code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCAmelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCAmelCase__ = f'''https://www.google.com/search?q={query}&num=100'''
UpperCAmelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCAmelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCAmelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=2_4 , UpperCAmelCase=2 , UpperCAmelCase=6 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=1_0_0_0 , ) -> List[str]:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =scope
_lowercase =range_bbox
def __A (self ) -> List[Any]:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase =bbox[i, j, 3]
_lowercase =bbox[i, j, 1]
_lowercase =t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase =bbox[i, j, 2]
_lowercase =bbox[i, j, 0]
_lowercase =t
_lowercase =None
if self.use_input_mask:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __A (self ) -> str:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> str:
_lowercase =LiltModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_lowercase =model(UpperCAmelCase , bbox=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_lowercase =model(UpperCAmelCase , bbox=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Tuple:
_lowercase =self.num_labels
_lowercase =LiltForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Tuple:
_lowercase =LiltForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , bbox=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self ) -> int:
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
return True
def __A (self ) -> int:
_lowercase =LiltModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> Any:
_lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase =type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
def __A (self ) -> Dict:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
@slow
def __A (self ) -> List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =LiltModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@slow
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> str:
_lowercase =LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(UpperCAmelCase )
_lowercase =torch.tensor([[1, 2]] , device=UpperCAmelCase )
_lowercase =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase )
_lowercase =torch.Size([1, 2, 7_6_8] )
_lowercase =torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase , atol=1e-3 ) )
| 5 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> str:
_lowercase =parent
_lowercase =1_3
_lowercase =7
_lowercase =True
_lowercase =True
_lowercase =True
_lowercase =True
_lowercase =9_9
_lowercase =3_2
_lowercase =2
_lowercase =4
_lowercase =3_7
_lowercase ='''gelu'''
_lowercase =0.1
_lowercase =0.1
_lowercase =5_1_2
_lowercase =1_6
_lowercase =2
_lowercase =0.02
_lowercase =3
_lowercase =4
_lowercase =None
def __A (self ) -> Any:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
_lowercase =TFRoFormerModel(config=UpperCAmelCase )
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowercase =[input_ids, input_mask]
_lowercase =model(UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =True
_lowercase =TFRoFormerForCausalLM(config=UpperCAmelCase )
_lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowercase =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =TFRoFormerForMaskedLM(config=UpperCAmelCase )
_lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_lowercase =self.num_labels
_lowercase =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
_lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
_lowercase =self.num_choices
_lowercase =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
_lowercase =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =self.num_labels
_lowercase =TFRoFormerForTokenClassification(config=UpperCAmelCase )
_lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
_lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self ) -> int:
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __A (self ) -> Tuple:
_lowercase =TFRoFormerModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> str:
self.config_tester.run_common_tests()
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> Dict:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A (self ) -> Any:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A (self ) -> Any:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A (self ) -> Optional[int]:
_lowercase =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> Dict:
_lowercase =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_lowercase =tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase =model(UpperCAmelCase )[0]
# TODO Replace vocab size
_lowercase =5_0_0_0_0
_lowercase =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_lowercase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
SCREAMING_SNAKE_CASE__ = 1E-4
def __A (self ) -> List[Any]:
_lowercase =tf.constant([[4, 1_0]] )
_lowercase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_lowercase =emba(input_ids.shape )
_lowercase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def __A (self ) -> Optional[Any]:
_lowercase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_lowercase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
_lowercase =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
SCREAMING_SNAKE_CASE__ = 1E-4
def __A (self ) -> List[Any]:
# 2,12,16,64
_lowercase =tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
_lowercase =-tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
_lowercase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
_lowercase =embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
_lowercase , _lowercase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_lowercase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_lowercase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 5 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger('''transformers.models.encodec''')
UpperCAmelCase__ = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
UpperCAmelCase__ = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
UpperCAmelCase__ = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
UpperCAmelCase__ = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
UpperCAmelCase__ = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
UpperCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_lowercase =getattr(__snake_case , __snake_case )
if weight_type is not None:
_lowercase =getattr(__snake_case , __snake_case ).shape
else:
_lowercase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
elif weight_type == "running_mean":
_lowercase =value
elif weight_type == "running_var":
_lowercase =value
elif weight_type == "num_batches_tracked":
_lowercase =value
elif weight_type == "weight_ih_l0":
_lowercase =value
elif weight_type == "weight_hh_l0":
_lowercase =value
elif weight_type == "bias_ih_l0":
_lowercase =value
elif weight_type == "bias_hh_l0":
_lowercase =value
elif weight_type == "weight_ih_l1":
_lowercase =value
elif weight_type == "weight_hh_l1":
_lowercase =value
elif weight_type == "bias_ih_l1":
_lowercase =value
elif weight_type == "bias_hh_l1":
_lowercase =value
else:
_lowercase =value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowercase , _lowercase =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =[]
if model_name == "encodec_24khz" or "encodec_32khz":
_lowercase =MAPPING_24K
elif model_name == "encodec_48khz":
_lowercase =MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(__snake_case , __snake_case ):
logger.info(F"{name} was ignored" )
continue
_lowercase =False
for key, mapped_key in MAPPING.items():
if "*" in key:
_lowercase , _lowercase =key.split('''.*.''' )
if prefix in name and suffix in name:
_lowercase =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(__snake_case )[0].split('''.''' )[-2]
_lowercase =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_lowercase ='''weight_g'''
elif "weight_v" in name:
_lowercase ='''weight_v'''
elif "weight_ih_l0" in name:
_lowercase ='''weight_ih_l0'''
elif "weight_hh_l0" in name:
_lowercase ='''weight_hh_l0'''
elif "bias_ih_l0" in name:
_lowercase ='''bias_ih_l0'''
elif "bias_hh_l0" in name:
_lowercase ='''bias_hh_l0'''
elif "weight_ih_l1" in name:
_lowercase ='''weight_ih_l1'''
elif "weight_hh_l1" in name:
_lowercase ='''weight_hh_l1'''
elif "bias_ih_l1" in name:
_lowercase ='''bias_ih_l1'''
elif "bias_hh_l1" in name:
_lowercase ='''bias_hh_l1'''
elif "bias" in name:
_lowercase ='''bias'''
elif "weight" in name:
_lowercase ='''weight'''
elif "running_mean" in name:
_lowercase ='''running_mean'''
elif "running_var" in name:
_lowercase ='''running_var'''
elif "num_batches_tracked" in name:
_lowercase ='''num_batches_tracked'''
else:
_lowercase =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
_lowercase =EncodecConfig.from_pretrained(__snake_case )
else:
_lowercase =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_lowercase =[8, 5, 4, 4]
_lowercase =[2.2]
_lowercase =64
_lowercase =32000
_lowercase =2048
_lowercase =False
_lowercase =False
_lowercase =False
elif model_name == "encodec_48khz":
_lowercase =[8, 5, 4, 2]
_lowercase =[3.0, 6.0, 12.0, 24.0]
_lowercase =48000
_lowercase =2
_lowercase =False
_lowercase ='''time_group_norm'''
_lowercase =True
_lowercase =1.0
_lowercase =0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
_lowercase =EncodecModel(__snake_case )
_lowercase =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__snake_case )
_lowercase =torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_lowercase =original_checkpoint['''best_state''']
recursively_load_weights(__snake_case , __snake_case , __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 5 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 1 |
from typing import Any
import numpy as np
def UpperCAmelCase_ ( __snake_case ) -> bool:
"""simple docstring"""
return np.array_equal(__snake_case , matrix.conjugate().T )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Any:
"""simple docstring"""
_lowercase =v.conjugate().T
_lowercase =v_star.dot(__snake_case )
assert isinstance(__snake_case , np.ndarray )
return (v_star_dot.dot(__snake_case )) / (v_star.dot(__snake_case ))
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_lowercase =np.array([[1], [2], [3]] )
assert is_hermitian(__snake_case ), F"{a} is not hermitian."
print(rayleigh_quotient(__snake_case , __snake_case ) )
_lowercase =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__snake_case ), F"{a} is not hermitian."
assert rayleigh_quotient(__snake_case , __snake_case ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 5 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_ ( __snake_case = 1000000 , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_lowercase =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_lowercase =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=__snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=__snake_case , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=__snake_case , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=__snake_case , default=0 , help='''cuda_id.''' , )
_lowercase =parser.parse_args()
return args
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if not len(__snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_lowercase , _lowercase =imgs[0].size
_lowercase =Image.new('''RGB''' , size=(cols * w, rows * h) )
_lowercase , _lowercase =grid.size
for i, img in enumerate(__snake_case ):
grid.paste(__snake_case , box=(i % cols * w, i // cols * h) )
return grid
def UpperCAmelCase_ ( __snake_case , __snake_case="robotic cat with wings" , __snake_case=7.5 , __snake_case=50 , __snake_case=1 , __snake_case=42 , ) -> Optional[int]:
"""simple docstring"""
_lowercase =torch.Generator(pipeline.device ).manual_seed(__snake_case )
_lowercase =pipeline(
__snake_case , guidance_scale=__snake_case , num_inference_steps=__snake_case , generator=__snake_case , num_images_per_prompt=__snake_case , ).images
_lowercase =int(math.sqrt(__snake_case ) )
_lowercase =image_grid(__snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
UpperCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
UpperCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
UpperCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
UpperCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
UpperCAmelCase__ = unet.to(torch.device('''cuda''', args.cuda_id))
UpperCAmelCase__ = pipeline.to(unet.device)
UpperCAmelCase__ ,UpperCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
UpperCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 5 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''perceiver'''
def __init__(self , UpperCAmelCase=2_5_6 , UpperCAmelCase=1_2_8_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1 , UpperCAmelCase=2_6 , UpperCAmelCase=8 , UpperCAmelCase=8 , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="kv" , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=2_6_2 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=5_6 , UpperCAmelCase=[3_6_8, 4_9_6] , UpperCAmelCase=1_6 , UpperCAmelCase=1_9_2_0 , UpperCAmelCase=1_6 , UpperCAmelCase=[1, 1_6, 2_2_4, 2_2_4] , **UpperCAmelCase , ) -> str:
super().__init__(**UpperCAmelCase )
_lowercase =num_latents
_lowercase =d_latents
_lowercase =d_model
_lowercase =num_blocks
_lowercase =num_self_attends_per_block
_lowercase =num_self_attention_heads
_lowercase =num_cross_attention_heads
_lowercase =qk_channels
_lowercase =v_channels
_lowercase =cross_attention_shape_for_attention
_lowercase =self_attention_widening_factor
_lowercase =cross_attention_widening_factor
_lowercase =hidden_act
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =use_query_residual
# masked language modeling attributes
_lowercase =vocab_size
_lowercase =max_position_embeddings
# image classification attributes
_lowercase =image_size
# flow attributes
_lowercase =train_size
# multimodal autoencoding attributes
_lowercase =num_frames
_lowercase =audio_samples_per_frame
_lowercase =samples_per_patch
_lowercase =output_shape
class lowerCamelCase__ ( lowerCAmelCase):
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowercase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def __A (self ) -> float:
return 1e-4
def __A (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 3 , UpperCAmelCase = 4_0 , UpperCAmelCase = 4_0 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(UpperCAmelCase , UpperCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase =compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase =preprocessor.num_special_tokens_to_add(UpperCAmelCase )
_lowercase =compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowercase =[''' '''.join(['''a'''] ) * seq_length] * batch_size
_lowercase =dict(preprocessor(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
_lowercase =inputs.pop('''input_ids''' )
return inputs
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase =compute_effective_axis_dimension(UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
_lowercase =self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_lowercase =dict(preprocessor(images=UpperCAmelCase , return_tensors=UpperCAmelCase ) )
_lowercase =inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 5 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_lowercase =(
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
_lowercase =model.state_dict()
def to_tf_var_name(__snake_case ):
for patt, repl in iter(__snake_case ):
_lowercase =name.replace(__snake_case , __snake_case )
return F"bert/{name}"
def create_tf_var(__snake_case , __snake_case , __snake_case ):
_lowercase =tf.dtypes.as_dtype(tensor.dtype )
_lowercase =tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowercase =to_tf_var_name(__snake_case )
_lowercase =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowercase =torch_tensor.T
_lowercase =create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
_lowercase =session.run(__snake_case )
print(F"Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}" )
_lowercase =tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCAmelCase_ ( __snake_case=None ) -> int:
"""simple docstring"""
_lowercase =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , required=__snake_case , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__snake_case , default=__snake_case , required=__snake_case , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__snake_case , required=__snake_case , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__snake_case , required=__snake_case , help='''Directory in which to save tensorflow model''' )
_lowercase =parser.parse_args(__snake_case )
_lowercase =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import logging
from transformers import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''bertabs'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=5_1_2 , UpperCAmelCase=6 , UpperCAmelCase=5_1_2 , UpperCAmelCase=8 , UpperCAmelCase=5_1_2 , UpperCAmelCase=0.2 , UpperCAmelCase=6 , UpperCAmelCase=7_6_8 , UpperCAmelCase=8 , UpperCAmelCase=2_0_4_8 , UpperCAmelCase=0.2 , **UpperCAmelCase , ) -> str:
super().__init__(**UpperCAmelCase )
_lowercase =vocab_size
_lowercase =max_pos
_lowercase =enc_layers
_lowercase =enc_hidden_size
_lowercase =enc_heads
_lowercase =enc_ff_size
_lowercase =enc_dropout
_lowercase =dec_layers
_lowercase =dec_hidden_size
_lowercase =dec_heads
_lowercase =dec_ff_size
_lowercase =dec_dropout
| 5 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase="None" , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Any:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =relative_attention
_lowercase =position_biased_input
_lowercase =pos_att_type
_lowercase =scope
def __A (self ) -> Any:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self ) -> Optional[int]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A (self ) -> List[Any]:
_lowercase =self.get_config()
_lowercase =3_0_0
return config
def __A (self , UpperCAmelCase ) -> Union[str, Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =DebertaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
_lowercase =model(UpperCAmelCase , token_type_ids=UpperCAmelCase )[0]
_lowercase =model(UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =DebertaForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =self.num_labels
_lowercase =DebertaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =self.num_labels
_lowercase =DebertaForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
_lowercase =DebertaForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self ) -> Any:
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> List[str]:
_lowercase =DebertaModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Dict:
self.config_tester.run_common_tests()
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase )
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase )
def __A (self ) -> int:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase )
@slow
def __A (self ) -> Optional[int]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =DebertaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def __A (self ) -> Optional[Any]:
pass
@slow
def __A (self ) -> Any:
_lowercase =DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_lowercase =torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowercase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
# compare the actual values for a slice.
_lowercase =torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) , f"{output[:, 1:4, 1:4]}" )
| 5 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 1 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[Any]:
"""simple docstring"""
inspect_dataset(__snake_case , __snake_case )
_lowercase =path + '''.py'''
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Any:
"""simple docstring"""
inspect_metric(__snake_case , __snake_case )
_lowercase =path + '''.py'''
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =get_dataset_config_info(__snake_case , config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> int:
"""simple docstring"""
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case , config_name=__snake_case )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
_lowercase =expected_configs[0]
assert expected_config in infos
_lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =get_dataset_infos(__snake_case )
assert expected_config in infos
_lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case , config_name=__snake_case )
| 5 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 5 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase__ = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
UpperCAmelCase__ = {
'''169M''': 768,
'''430M''': 1024,
'''1B5''': 2048,
'''3B''': 2560,
'''7B''': 4096,
'''14B''': 5120,
}
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =list(state_dict.keys() )
for name in state_dict_keys:
_lowercase =state_dict.pop(__snake_case )
# emb -> embedding
if name.startswith('''emb.''' ):
_lowercase =name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
_lowercase =name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
_lowercase =re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , __snake_case )
# ffn -> feed_forward
_lowercase =re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , __snake_case )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
_lowercase =name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
_lowercase =name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
_lowercase =name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
_lowercase ='''rwkv.''' + name
_lowercase =weight
return state_dict
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=None ) -> int:
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
_lowercase =50277
_lowercase =AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
_lowercase =PreTrainedTokenizerFast(tokenizer_file=__snake_case )
_lowercase =len(__snake_case )
tokenizer.save_pretrained(__snake_case )
# 2. Build the config
_lowercase =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_lowercase =candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
_lowercase =RwkvConfig(
vocab_size=__snake_case , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__snake_case )
# 3. Download model file then convert state_dict
_lowercase =hf_hub_download(__snake_case , __snake_case )
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
_lowercase =convert_state_dict(__snake_case )
# 4. Split in shards and save
_lowercase , _lowercase =shard_checkpoint(__snake_case )
for shard_file, shard in shards.items():
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if index is not None:
_lowercase =os.path.join(__snake_case , __snake_case )
# Save the index as well
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
_lowercase =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
_lowercase =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_lowercase =torch.load(os.path.join(__snake_case , __snake_case ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__snake_case , __snake_case ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
_lowercase =AutoModelForCausalLM.from_pretrained(__snake_case )
model.push_to_hub(__snake_case , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCAmelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 5 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 1 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase ) -> int:
_lowercase =data
def __iter__(self ) -> Optional[Any]:
for element in self.data:
yield element
def UpperCAmelCase_ ( __snake_case=True ) -> Tuple:
"""simple docstring"""
_lowercase =Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> List[Any]:
"""simple docstring"""
if iterable:
_lowercase =DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
_lowercase =TensorDataset(torch.as_tensor(range(__snake_case ) ) )
_lowercase =DataLoader(__snake_case , batch_size=__snake_case )
_lowercase =accelerator.prepare(__snake_case )
return dl
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> str:
"""simple docstring"""
_lowercase =create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
_lowercase =[len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_lowercase =create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase_ ( ) -> Dict:
"""simple docstring"""
_lowercase =create_accelerator(even_batches=__snake_case )
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
_lowercase =[]
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
_lowercase =ddp_model(batch[0].float() )
_lowercase =output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
_lowercase =True
_lowercase =False
_lowercase =create_accelerator(even_batches=__snake_case )
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
_lowercase =train_dl.batch_sampler.even_batches
_lowercase =valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
_lowercase =True
_lowercase =False
_lowercase =create_accelerator(even_batches=__snake_case )
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
_lowercase =create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
_lowercase =batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
_lowercase =create_accelerator()
_lowercase =torch.nn.Linear(1 , 1 )
_lowercase =accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_lowercase =create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
_lowercase =accelerator.state.distributed_type
_lowercase =DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
_lowercase =original_state
if __name__ == "__main__":
main()
| 5 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = ['''model.decoder.embed_positions.weights''']
def UpperCAmelCase_ ( __snake_case ) -> List[str]:
"""simple docstring"""
if "emb" in name:
_lowercase =name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
_lowercase =name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
_lowercase =name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
_lowercase =name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
_lowercase =name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
_lowercase =name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
_lowercase =name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
_lowercase =name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
_lowercase =name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
_lowercase =name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
_lowercase =name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Tuple[Dict, Dict]:
"""simple docstring"""
_lowercase =list(state_dict.keys() )
_lowercase ={}
for key in keys:
_lowercase =state_dict.pop(__snake_case )
_lowercase =rename_keys(__snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
_lowercase =val[:hidden_size, :]
_lowercase =val[hidden_size : 2 * hidden_size, :]
_lowercase =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowercase =val
else:
_lowercase =val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase_ ( __snake_case ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
_lowercase =1024
_lowercase =24
_lowercase =16
elif checkpoint == "medium":
_lowercase =1536
_lowercase =48
_lowercase =24
elif checkpoint == "large":
_lowercase =2048
_lowercase =48
_lowercase =32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_lowercase =MusicgenDecoderConfig(
hidden_size=__snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=__snake_case , num_attention_heads=__snake_case , )
return config
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case=None , __snake_case=None , __snake_case="cpu" ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MusicGen.get_pretrained(__snake_case , device=__snake_case )
_lowercase =decoder_config_from_checkpoint(__snake_case )
_lowercase =fairseq_model.lm.state_dict()
_lowercase , _lowercase =rename_state_dict(
__snake_case , hidden_size=decoder_config.hidden_size )
_lowercase =TaEncoderModel.from_pretrained('''t5-base''' )
_lowercase =EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
_lowercase =MusicgenForCausalLM(__snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowercase , _lowercase =decoder.load_state_dict(__snake_case , strict=__snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__snake_case )
if len(__snake_case ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__snake_case ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_lowercase =MusicgenForConditionalGeneration(text_encoder=__snake_case , audio_encoder=__snake_case , decoder=__snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__snake_case )
# check we can do a forward pass
_lowercase =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowercase =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowercase =model(input_ids=__snake_case , decoder_input_ids=__snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
_lowercase =AutoTokenizer.from_pretrained('''t5-base''' )
_lowercase =AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
_lowercase =MusicgenProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
# set the appropriate bos/pad token ids
_lowercase =2048
_lowercase =2048
# set other default generation config params
_lowercase =int(30 * audio_encoder.config.frame_rate )
_lowercase =True
_lowercase =3.0
if pytorch_dump_folder is not None:
Path(__snake_case ).mkdir(exist_ok=__snake_case )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__snake_case )
processor.push_to_hub(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 5 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
_lowercase =feature_size
_lowercase =sampling_rate
_lowercase =padding_value
_lowercase =kwargs.pop('''padding_side''' , '''right''' )
_lowercase =kwargs.pop('''return_attention_mask''' , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_lowercase ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase ) == 0:
if return_attention_mask:
_lowercase =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowercase =required_input[0]
if isinstance(UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowercase =0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase ):
_lowercase =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase ):
_lowercase ='''tf'''
elif is_torch_tensor(UpperCAmelCase ):
_lowercase ='''pt'''
elif isinstance(UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
_lowercase ='''np'''
else:
raise ValueError(
f"type of {first_element} unknown: {type(UpperCAmelCase )}. "
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_lowercase =to_numpy(UpperCAmelCase )
else:
_lowercase =[to_numpy(UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowercase =self._get_padding_strategies(padding=UpperCAmelCase , max_length=UpperCAmelCase )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =len(UpperCAmelCase )
if not all(len(UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_lowercase =[]
for i in range(UpperCAmelCase ):
_lowercase ={k: v[i] for k, v in processed_features.items()}
# truncation
_lowercase =self._truncate(
UpperCAmelCase , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
truncated_inputs.append(UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowercase =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowercase =PaddingStrategy.MAX_LENGTH
_lowercase ={}
for i in range(UpperCAmelCase ):
# padding
_lowercase =self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_lowercase =[]
if value.dtype is np.dtype(np.floataa ):
_lowercase =value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase )
return BatchFeature(UpperCAmelCase , tensor_type=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ) -> dict:
_lowercase =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowercase =len(UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowercase =np.ones(len(UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_lowercase =max_length - len(UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
_lowercase =np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_lowercase =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowercase =np.pad(
UpperCAmelCase , UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowercase =np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_lowercase =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowercase =np.pad(
UpperCAmelCase , UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> List[str]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_lowercase =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =len(UpperCAmelCase ) > max_length
if needs_to_be_truncated:
_lowercase =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowercase =processed_features['''attention_mask'''][:max_length]
return processed_features
def __A (self , UpperCAmelCase=False , UpperCAmelCase=None ) -> Dict:
# Get padding strategy
if padding is not False:
if padding is True:
_lowercase =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =PaddingStrategy(UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =padding
else:
_lowercase =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 5 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
UpperCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_lowercase =getattr(__snake_case , __snake_case )
if weight_type is not None:
_lowercase =getattr(__snake_case , __snake_case ).shape
else:
_lowercase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowercase =value
elif weight_type == "weight_g":
_lowercase =value
elif weight_type == "weight_v":
_lowercase =value
elif weight_type == "bias":
_lowercase =value
else:
_lowercase =value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =[]
_lowercase =fairseq_model.state_dict()
_lowercase =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowercase =False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_lowercase =True
else:
for key, mapped_key in MAPPING.items():
_lowercase ='''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
_lowercase =True
if "*" in mapped_key:
_lowercase =name.split(__snake_case )[0].split('''.''' )[-2]
_lowercase =mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_lowercase ='''weight_g'''
elif "weight_v" in name:
_lowercase ='''weight_v'''
elif "bias" in name:
_lowercase ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowercase ='''weight'''
else:
_lowercase =None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
"""simple docstring"""
_lowercase =full_name.split('''conv_layers.''' )[-1]
_lowercase =name.split('''.''' )
_lowercase =int(items[0] )
_lowercase =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_lowercase =value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=True ) -> Any:
"""simple docstring"""
if config_path is not None:
_lowercase =UniSpeechSatConfig.from_pretrained(__snake_case )
else:
_lowercase =UniSpeechSatConfig()
_lowercase =''''''
if is_finetuned:
_lowercase =UniSpeechSatForCTC(__snake_case )
else:
_lowercase =UniSpeechSatForPreTraining(__snake_case )
_lowercase , _lowercase , _lowercase =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_lowercase =model[0].eval()
recursively_load_weights(__snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 5 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
UpperCAmelCase__ = TypeVar('''T''')
UpperCAmelCase__ = Union[List[T], Tuple[T, ...]]
UpperCAmelCase__ = Union[T, List[T], Dict[str, T]]
UpperCAmelCase__ = Union[str, bytes, os.PathLike]
| 5 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class lowerCamelCase__ ( lowerCAmelCase):
def __A (self , UpperCAmelCase ) -> np.ndarray:
if self.framework == "tf":
_lowercase =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_lowercase =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __A (self , UpperCAmelCase ) -> np.ndarray:
_lowercase =self.get_masked_index(UpperCAmelCase )
_lowercase =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def __A (self , UpperCAmelCase ) -> List[Any]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_lowercase =self.framework
_lowercase =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase )
self.ensure_exactly_one_mask_token(UpperCAmelCase )
return model_inputs
def __A (self , UpperCAmelCase ) -> Union[str, Any]:
_lowercase =self.model(**UpperCAmelCase )
_lowercase =model_inputs['''input_ids''']
return model_outputs
def __A (self , UpperCAmelCase , UpperCAmelCase=5 , UpperCAmelCase=None ) -> Tuple:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_lowercase =target_ids.shape[0]
_lowercase =model_outputs['''input_ids'''][0]
_lowercase =model_outputs['''logits''']
if self.framework == "tf":
_lowercase =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_lowercase =outputs.numpy()
_lowercase =outputs[0, masked_index, :]
_lowercase =stable_softmax(UpperCAmelCase , axis=-1 )
if target_ids is not None:
_lowercase =tf.gather_nd(tf.squeeze(UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
_lowercase =tf.expand_dims(UpperCAmelCase , 0 )
_lowercase =tf.math.top_k(UpperCAmelCase , k=UpperCAmelCase )
_lowercase , _lowercase =topk.values.numpy(), topk.indices.numpy()
else:
_lowercase =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_lowercase =outputs[0, masked_index, :]
_lowercase =logits.softmax(dim=-1 )
if target_ids is not None:
_lowercase =probs[..., target_ids]
_lowercase , _lowercase =probs.topk(UpperCAmelCase )
_lowercase =[]
_lowercase =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_lowercase =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_lowercase =input_ids.numpy().copy()
if target_ids is not None:
_lowercase =target_ids[p].tolist()
_lowercase =p
# Filter padding out:
_lowercase =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_lowercase =self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_lowercase ={'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(UpperCAmelCase )
result.append(UpperCAmelCase )
if single_mask:
return result[0]
return result
def __A (self , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =[targets]
try:
_lowercase =self.tokenizer.get_vocab()
except Exception:
_lowercase ={}
_lowercase =[]
for target in targets:
_lowercase =vocab.get(UpperCAmelCase , UpperCAmelCase )
if id_ is None:
_lowercase =self.tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , max_length=1 , truncation=UpperCAmelCase , )['''input_ids''']
if len(UpperCAmelCase ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_lowercase =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
_lowercase =list(set(UpperCAmelCase ) )
if len(UpperCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_lowercase =np.array(UpperCAmelCase )
return target_ids
def __A (self , UpperCAmelCase=None , UpperCAmelCase=None ) -> List[str]:
_lowercase ={}
if targets is not None:
_lowercase =self.get_target_ids(UpperCAmelCase , UpperCAmelCase )
_lowercase =target_ids
if top_k is not None:
_lowercase =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__(self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
_lowercase =super().__call__(UpperCAmelCase , **UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''AutoTokenizer'''
SCREAMING_SNAKE_CASE__ = ['''tokenizer''']
SCREAMING_SNAKE_CASE__ = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__(self , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[Any]:
super().__init__(UpperCAmelCase )
_lowercase =speaker_embeddings
@classmethod
def __A (cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
_lowercase =get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('''subfolder''' , UpperCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCAmelCase ) , force_download=kwargs.pop('''force_download''' , UpperCAmelCase ) , proxies=kwargs.pop('''proxies''' , UpperCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , UpperCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCAmelCase ) , revision=kwargs.pop('''revision''' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
_lowercase =None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_lowercase =json.load(UpperCAmelCase )
else:
_lowercase =None
_lowercase =AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ) -> int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , '''v2''' ) , exist_ok=UpperCAmelCase )
_lowercase ={}
_lowercase =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowercase =self._load_voice_preset(UpperCAmelCase )
_lowercase ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , UpperCAmelCase , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_lowercase =os.path.join(UpperCAmelCase , f"{prompt_key}_{key}.npy" )
_lowercase =tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , '''w''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def __A (self , UpperCAmelCase = None , **UpperCAmelCase ) -> int:
_lowercase =self.speaker_embeddings[voice_preset]
_lowercase ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
_lowercase =get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , UpperCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , UpperCAmelCase ) , force_download=kwargs.pop('''force_download''' , UpperCAmelCase ) , proxies=kwargs.pop('''proxies''' , UpperCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , UpperCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , UpperCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , UpperCAmelCase ) , revision=kwargs.pop('''revision''' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
_lowercase =np.load(UpperCAmelCase )
return voice_preset_dict
def __A (self , UpperCAmelCase = None ) -> List[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=2_5_6 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ) -> List[Any]:
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowercase =self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('''.npz''' ):
_lowercase =voice_preset + '''.npz'''
_lowercase =np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_lowercase =BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_lowercase =self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_lowercase =voice_preset
return encoded_text
| 5 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 1 |
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
_lowercase =0
for i in range(1 , 1001 ):
total += i**i
return str(__snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 5 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 1 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''data2vec-audio'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_6 , UpperCAmelCase=1_9 , UpperCAmelCase=5 , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=0 , UpperCAmelCase="sum" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =conv_pos_kernel_size
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =vocab_size
_lowercase =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
_lowercase =mask_feature_min_masks
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return math.prod(self.conv_stride )
| 5 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
from collections import deque
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =deque()
_lowercase =[False for _ in range(__snake_case )]
_lowercase =[-1 for _ in range(__snake_case )]
_lowercase =index_of[:]
def strong_connect(__snake_case , __snake_case , __snake_case ):
_lowercase =index # the number when this node is seen
_lowercase =index # lowest rank node reachable from here
index += 1
stack.append(__snake_case )
_lowercase =True
for w in g[v]:
if index_of[w] == -1:
_lowercase =strong_connect(__snake_case , __snake_case , __snake_case )
_lowercase =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_lowercase =(
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_lowercase =[]
_lowercase =stack.pop()
_lowercase =False
component.append(__snake_case )
while w != v:
_lowercase =stack.pop()
_lowercase =False
component.append(__snake_case )
components.append(__snake_case )
return index
_lowercase =[]
for v in range(__snake_case ):
if index_of[v] == -1:
strong_connect(__snake_case , 0 , __snake_case )
return components
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =[[] for _ in range(__snake_case )]
for u, v in edges:
g[u].append(__snake_case )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase__ = 7
UpperCAmelCase__ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase__ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase__ = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase__ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase__ = 25_0004
UpperCAmelCase__ = 25_0020
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = MBartaaTokenizer
SCREAMING_SNAKE_CASE__ = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def __A (self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase =MBartaaTokenizer(UpperCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __A (self ) -> str:
_lowercase ='''<s>'''
_lowercase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def __A (self ) -> int:
_lowercase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(UpperCAmelCase ) , 1_0_5_4 )
def __A (self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def __A (self ) -> Any:
_lowercase =MBartaaTokenizer(UpperCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=UpperCAmelCase )
_lowercase =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowercase =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_lowercase =tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowercase =tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def __A (self ) -> int:
# fmt: off
_lowercase ={'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def __A (self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowercase =(self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowercase =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
_lowercase =self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
_lowercase =tempfile.mkdtemp()
_lowercase =tokenizer_r.save_pretrained(UpperCAmelCase )
_lowercase =tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_lowercase =tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
_lowercase =tokenizer_r.from_pretrained(UpperCAmelCase )
_lowercase =tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
_lowercase =tempfile.mkdtemp()
_lowercase =tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
_lowercase =tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase , UpperCAmelCase )
# Checks everything loads correctly in the same way
_lowercase =tokenizer_r.from_pretrained(UpperCAmelCase )
_lowercase =tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
_lowercase =tempfile.mkdtemp()
_lowercase =tokenizer_r.save_pretrained(UpperCAmelCase , legacy_format=UpperCAmelCase )
_lowercase =tokenizer_p.save_pretrained(UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowercase =tokenizer_r.from_pretrained(UpperCAmelCase )
_lowercase =tokenizer_p.from_pretrained(UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) )
shutil.rmtree(UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase):
SCREAMING_SNAKE_CASE__ = '''facebook/mbart-large-50-one-to-many-mmt'''
SCREAMING_SNAKE_CASE__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
SCREAMING_SNAKE_CASE__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
SCREAMING_SNAKE_CASE__ = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def __A (cls ) -> List[Any]:
_lowercase =MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_lowercase =1
return cls
def __A (self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def __A (self ) -> Tuple:
_lowercase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
def __A (self ) -> Optional[int]:
self.assertIn(UpperCAmelCase , self.tokenizer.all_special_ids )
_lowercase =[RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_lowercase =self.tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_lowercase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase )
def __A (self ) -> Any:
_lowercase =['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , UpperCAmelCase )
_lowercase =1_0
_lowercase =self.tokenizer(UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
def __A (self ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def __A (self ) -> Dict:
_lowercase =tempfile.mkdtemp()
_lowercase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase )
_lowercase =MBartaaTokenizer.from_pretrained(UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase )
@require_torch
def __A (self ) -> int:
_lowercase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , return_tensors='''pt''' )
_lowercase =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __A (self ) -> List[Any]:
_lowercase =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowercase =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __A (self ) -> List[Any]:
_lowercase =self.tokenizer(self.src_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=3 , return_tensors='''pt''' )
_lowercase =self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=1_0 , return_tensors='''pt''' )
_lowercase =targets['''input_ids''']
_lowercase =shift_tokens_right(UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __A (self ) -> Optional[Any]:
_lowercase =self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 5 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''mgp-str'''
def __init__(self , UpperCAmelCase=[3_2, 1_2_8] , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=2_7 , UpperCAmelCase=3_8 , UpperCAmelCase=5_0_2_5_7 , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=4.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=0.02 , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =max_token_length
_lowercase =num_character_labels
_lowercase =num_bpe_labels
_lowercase =num_wordpiece_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =mlp_ratio
_lowercase =distilled
_lowercase =layer_norm_eps
_lowercase =drop_rate
_lowercase =qkv_bias
_lowercase =attn_drop_rate
_lowercase =drop_path_rate
_lowercase =output_aa_attentions
_lowercase =initializer_range
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Dict:
torch.manual_seed(0 )
_lowercase =UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCAmelCase , use_timestep_embedding=UpperCAmelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_lowercase =IPNDMScheduler()
_lowercase ={
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __A (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
if str(UpperCAmelCase ).startswith('''mps''' ):
_lowercase =torch.manual_seed(UpperCAmelCase )
else:
_lowercase =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_lowercase ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __A (self ) -> Tuple:
_lowercase ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components()
_lowercase =DanceDiffusionPipeline(**UpperCAmelCase )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =self.get_dummy_inputs(UpperCAmelCase )
_lowercase =pipe(**UpperCAmelCase )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowercase =np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A (self ) -> Any:
return super().test_save_load_local()
@skip_mps
def __A (self ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __A (self ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def __A (self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
def __A (self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A (self ) -> List[str]:
_lowercase =torch_device
_lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase =np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __A (self ) -> Optional[Any]:
_lowercase =torch_device
_lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase =np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 5 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase__ :
def __A (self , UpperCAmelCase ) -> Dict:
raise NotImplementedError()
def __A (self ) -> Optional[int]:
raise NotImplementedError()
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase = False , **UpperCAmelCase ) -> Any:
_lowercase =tokenizer
_lowercase =skip_prompt
_lowercase =decode_kwargs
# variables used in the streaming process
_lowercase =[]
_lowercase =0
_lowercase =True
def __A (self , UpperCAmelCase ) -> int:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
_lowercase =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowercase =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowercase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
_lowercase =text[self.print_len :]
_lowercase =[]
_lowercase =0
# If the last token is a CJK character, we print the characters.
elif len(UpperCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowercase =text[self.print_len :]
self.print_len += len(UpperCAmelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowercase =text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(UpperCAmelCase )
self.on_finalized_text(UpperCAmelCase )
def __A (self ) -> Any:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowercase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowercase =text[self.print_len :]
_lowercase =[]
_lowercase =0
else:
_lowercase =''''''
_lowercase =True
self.on_finalized_text(UpperCAmelCase , stream_end=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = False ) -> Union[str, Any]:
print(UpperCAmelCase , flush=UpperCAmelCase , end='''''' if not stream_end else None )
def __A (self , UpperCAmelCase ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , **UpperCAmelCase ) -> List[str]:
super().__init__(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
_lowercase =Queue()
_lowercase =None
_lowercase =timeout
def __A (self , UpperCAmelCase , UpperCAmelCase = False ) -> Any:
self.text_queue.put(UpperCAmelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__(self ) -> List[str]:
return self
def __A (self ) -> Dict:
_lowercase =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 5 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''focalnet'''
def __init__(self , UpperCAmelCase=2_2_4 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=9_6 , UpperCAmelCase=False , UpperCAmelCase=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[3, 3, 3, 3] , UpperCAmelCase="gelu" , UpperCAmelCase=4.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=False , UpperCAmelCase=1e-4 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=3_2 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Dict:
super().__init__(**UpperCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =use_conv_embed
_lowercase =hidden_sizes
_lowercase =depths
_lowercase =focal_levels
_lowercase =focal_windows
_lowercase =hidden_act
_lowercase =mlp_ratio
_lowercase =hidden_dropout_prob
_lowercase =drop_path_rate
_lowercase =use_layerscale
_lowercase =layerscale_value
_lowercase =use_post_layernorm
_lowercase =use_post_layernorm_in_modulation
_lowercase =normalize_modulator
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =encoder_stride
_lowercase =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 5 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
UpperCAmelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
_lowercase =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase__ = [None] * 1000_0000
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def UpperCAmelCase_ ( __snake_case ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowercase =chain(next_number(__snake_case ) )
_lowercase =number_chain
while number < 10000000:
_lowercase =number_chain
number *= 10
return number_chain
def UpperCAmelCase_ ( __snake_case = 10000000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 5 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
UpperCAmelCase__ = Mapping[str, np.ndarray]
UpperCAmelCase__ = Mapping[str, Any] # Is a nested dict.
UpperCAmelCase__ = 0.01
@dataclasses.dataclass(frozen=lowerCAmelCase)
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
SCREAMING_SNAKE_CASE__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
SCREAMING_SNAKE_CASE__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
SCREAMING_SNAKE_CASE__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
SCREAMING_SNAKE_CASE__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
SCREAMING_SNAKE_CASE__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
SCREAMING_SNAKE_CASE__ = None
# Templates used to generate this protein (prediction-only)
SCREAMING_SNAKE_CASE__ = None
# Chain corresponding to each parent
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase_ ( __snake_case ) -> Protein:
"""simple docstring"""
_lowercase =r'''(\[[A-Z]+\]\n)'''
_lowercase =[tag.strip() for tag in re.split(__snake_case , __snake_case ) if len(__snake_case ) > 0]
_lowercase =zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
_lowercase =["N", "CA", "C"]
_lowercase =None
_lowercase =None
_lowercase =None
for g in groups:
if "[PRIMARY]" == g[0]:
_lowercase =g[1][0].strip()
for i in range(len(__snake_case ) ):
if seq[i] not in residue_constants.restypes:
_lowercase ='''X''' # FIXME: strings are immutable
_lowercase =np.array(
[residue_constants.restype_order.get(__snake_case , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_lowercase =[]
for axis in range(3 ):
tertiary.append(list(map(__snake_case , g[1][axis].split() ) ) )
_lowercase =np.array(__snake_case )
_lowercase =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
_lowercase =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_lowercase =np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
_lowercase =np.zeros(
(
len(__snake_case ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
_lowercase =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__snake_case , atom_mask=__snake_case , aatype=__snake_case , residue_index=np.arange(len(__snake_case ) ) , b_factors=__snake_case , )
def UpperCAmelCase_ ( __snake_case , __snake_case = 0 ) -> List[str]:
"""simple docstring"""
_lowercase =[]
_lowercase =prot.remark
if remark is not None:
pdb_headers.append(F"REMARK {remark}" )
_lowercase =prot.parents
_lowercase =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_lowercase =[p for i, p in zip(__snake_case , __snake_case ) if i == chain_id]
if parents is None or len(__snake_case ) == 0:
_lowercase =['''N/A''']
pdb_headers.append(F"PARENT {' '.join(__snake_case )}" )
return pdb_headers
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =[]
_lowercase =pdb_str.split('''\n''' )
_lowercase =prot.remark
if remark is not None:
out_pdb_lines.append(F"REMARK {remark}" )
_lowercase =42
if prot.parents is not None and len(prot.parents ) > 0:
_lowercase =[]
if prot.parents_chain_index is not None:
_lowercase ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__snake_case ) , [] )
parent_dict[str(__snake_case )].append(__snake_case )
_lowercase =max([int(__snake_case ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_lowercase =parent_dict.get(str(__snake_case ) , ['''N/A'''] )
parents_per_chain.append(__snake_case )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_lowercase =[['''N/A''']]
def make_parent_line(__snake_case ) -> str:
return F"PARENT {' '.join(__snake_case )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_lowercase =0
for i, l in enumerate(__snake_case ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__snake_case )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__snake_case ):
_lowercase =parents_per_chain[chain_counter]
else:
_lowercase =['''N/A''']
out_pdb_lines.append(make_parent_line(__snake_case ) )
return "\n".join(__snake_case )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =residue_constants.restypes + ['''X''']
def res_atoa(__snake_case ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
_lowercase =residue_constants.atom_types
_lowercase =[]
_lowercase =prot.atom_mask
_lowercase =prot.aatype
_lowercase =prot.atom_positions
_lowercase =prot.residue_index.astype(np.intaa )
_lowercase =prot.b_factors
_lowercase =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
_lowercase =get_pdb_headers(__snake_case )
if len(__snake_case ) > 0:
pdb_lines.extend(__snake_case )
_lowercase =aatype.shape[0]
_lowercase =1
_lowercase =0
_lowercase =string.ascii_uppercase
_lowercase =None
# Add all atom sites.
for i in range(__snake_case ):
_lowercase =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__snake_case , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_lowercase ='''ATOM'''
_lowercase =atom_name if len(__snake_case ) == 4 else F" {atom_name}"
_lowercase =''''''
_lowercase =''''''
_lowercase =1.00
_lowercase =atom_name[0] # Protein supports only C, N, O, S, this works.
_lowercase =''''''
_lowercase ='''A'''
if chain_index is not None:
_lowercase =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_lowercase =(
F"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
F"{res_name_a:>3} {chain_tag:>1}"
F"{residue_index[i]:>4}{insertion_code:>1} "
F"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
F"{occupancy:>6.2f}{b_factor:>6.2f} "
F"{element:>2}{charge:>2}"
)
pdb_lines.append(__snake_case )
atom_index += 1
_lowercase =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_lowercase =True
_lowercase =chain_index[i + 1]
if should_terminate:
# Close the chain.
_lowercase ='''TER'''
_lowercase =(
F"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(__snake_case )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__snake_case , __snake_case ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__snake_case )
def UpperCAmelCase_ ( __snake_case ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__snake_case , remark=__snake_case , parents=__snake_case , parents_chain_index=__snake_case , )
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 5 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 1 |
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ ( __snake_case ) -> bytes:
"""simple docstring"""
if len(__snake_case ) != 32:
raise ValueError('''Input must be of length 32''' )
_lowercase =b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ ( __snake_case ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
_lowercase =format(__snake_case , '''08x''' )[-8:]
_lowercase =b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def UpperCAmelCase_ ( __snake_case ) -> bytes:
"""simple docstring"""
_lowercase =b''''''
for char in message:
bit_string += format(__snake_case , '''08b''' ).encode('''utf-8''' )
_lowercase =format(len(__snake_case ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__snake_case ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase_ ( __snake_case ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__snake_case ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(__snake_case ) , 512 ):
_lowercase =bit_string[pos : pos + 512]
_lowercase =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
_lowercase =format(__snake_case , '''032b''' )
_lowercase =''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__snake_case , 2 )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase_ ( __snake_case ) -> bytes:
"""simple docstring"""
_lowercase =preprocess(__snake_case )
_lowercase =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowercase =0X6_7_4_5_2_3_0_1
_lowercase =0XE_F_C_D_A_B_8_9
_lowercase =0X9_8_B_A_D_C_F_E
_lowercase =0X1_0_3_2_5_4_7_6
_lowercase =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__snake_case ):
_lowercase =aa
_lowercase =ba
_lowercase =ca
_lowercase =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowercase =d ^ (b & (c ^ d))
_lowercase =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowercase =c ^ (d & (b ^ c))
_lowercase =(5 * i + 1) % 16
elif i <= 47:
_lowercase =b ^ c ^ d
_lowercase =(3 * i + 5) % 16
else:
_lowercase =c ^ (b | not_aa(__snake_case ))
_lowercase =(7 * i) % 16
_lowercase =(f + a + added_consts[i] + block_words[g]) % 2**32
_lowercase =d
_lowercase =c
_lowercase =b
_lowercase =sum_aa(__snake_case , left_rotate_aa(__snake_case , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowercase =sum_aa(__snake_case , __snake_case )
_lowercase =sum_aa(__snake_case , __snake_case )
_lowercase =sum_aa(__snake_case , __snake_case )
_lowercase =sum_aa(__snake_case , __snake_case )
_lowercase =reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
from __future__ import annotations
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_lowercase =len(__snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , )
def UpperCAmelCase_ ( __snake_case ) -> None:
"""simple docstring"""
_lowercase =[]
depth_first_search([] , [] , [] , __snake_case , __snake_case )
# Print all the boards
for board in boards:
for column in board:
print(__snake_case )
print('''''' )
print(len(__snake_case ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 5 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =LxmertConfig.from_json_file(__snake_case )
print(F"Building PyTorch model from configuration: {config}" )
_lowercase =LxmertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 5 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , **UpperCAmelCase , ) -> str:
super().__init__(features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , **UpperCAmelCase )
_lowercase =Sql(
cache_dir=UpperCAmelCase , features=UpperCAmelCase , sql=UpperCAmelCase , con=UpperCAmelCase , **UpperCAmelCase , )
def __A (self ) -> Dict:
_lowercase =None
_lowercase =None
_lowercase =None
_lowercase =None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , )
# Build dataset for splits
_lowercase =self.builder.as_dataset(
split='''train''' , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> Optional[int]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
_lowercase =dataset
_lowercase =name
_lowercase =con
_lowercase =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowercase =num_proc
_lowercase =to_sql_kwargs
def __A (self ) -> int:
_lowercase =self.to_sql_kwargs.pop('''sql''' , UpperCAmelCase )
_lowercase =self.to_sql_kwargs.pop('''con''' , UpperCAmelCase )
_lowercase =self.to_sql_kwargs.pop('''index''' , UpperCAmelCase )
_lowercase =self._write(index=UpperCAmelCase , **self.to_sql_kwargs )
return written
def __A (self , UpperCAmelCase ) -> Optional[Any]:
_lowercase , _lowercase , _lowercase =args
_lowercase ={**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
_lowercase =query_table(
table=self.dataset.data , key=slice(UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowercase =batch.to_pandas()
_lowercase =df.to_sql(self.name , self.con , index=UpperCAmelCase , **UpperCAmelCase )
return num_rows or len(UpperCAmelCase )
def __A (self , UpperCAmelCase , **UpperCAmelCase ) -> int:
_lowercase =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_lowercase , _lowercase =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCAmelCase , UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 5 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=9_9 , UpperCAmelCase=0 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase="last" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=0 , ) -> List[Any]:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_lengths
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =gelu_activation
_lowercase =sinusoidal_embeddings
_lowercase =causal
_lowercase =asm
_lowercase =n_langs
_lowercase =vocab_size
_lowercase =n_special
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =summary_type
_lowercase =use_proj
_lowercase =scope
_lowercase =bos_token_id
def __A (self ) -> Dict:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_input_lengths:
_lowercase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , 2 ).float()
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __A (self ) -> str:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Dict:
_lowercase =XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
_lowercase =model(UpperCAmelCase , langs=UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Dict:
_lowercase =XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[Any]:
_lowercase =XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
_lowercase =model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
_lowercase =outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
_lowercase =XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
_lowercase =model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
_lowercase =model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((_lowercase) , ) =result_with_labels.to_tuple()
_lowercase =model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((_lowercase) , ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> int:
_lowercase =XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
_lowercase =model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Dict:
_lowercase =self.num_labels
_lowercase =XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[str]:
_lowercase =self.num_choices
_lowercase =XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A (self ) -> Optional[int]:
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[Any]:
_lowercase =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def __A (self ) -> List[Any]:
_lowercase =XLMModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def __A (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def __A (self ) -> Any:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def __A (self ) -> int:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=1 ) -> str:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
_lowercase =min_length + idx + 1
_lowercase =min_length + idx + 1
_lowercase =(
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=1 ) -> int:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
_lowercase =min_length + idx + 1
_lowercase =(batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def __A (self ) -> Union[str, Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> Union[str, Any]:
_lowercase =XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(UpperCAmelCase )
_lowercase =torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
_lowercase =[
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase =model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 5 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = ProphetNetTokenizer
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[int]:
super().setUp()
_lowercase =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __A (self , UpperCAmelCase ) -> int:
_lowercase ='''UNwant\u00E9d,running'''
_lowercase ='''unwanted, running'''
return input_text, output_text
def __A (self ) -> Optional[int]:
_lowercase =self.tokenizer_class(self.vocab_file )
_lowercase =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __A (self ) -> Union[str, Any]:
_lowercase =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __A (self ) -> List[str]:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __A (self ) -> Dict:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __A (self ) -> List[str]:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __A (self ) -> Optional[int]:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __A (self ) -> str:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __A (self ) -> Union[str, Any]:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __A (self ) -> int:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __A (self ) -> List[Any]:
_lowercase =BasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __A (self ) -> List[str]:
_lowercase =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_lowercase ={}
for i, token in enumerate(UpperCAmelCase ):
_lowercase =i
_lowercase =WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __A (self ) -> Union[str, Any]:
_lowercase =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_lowercase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowercase =[1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_lowercase =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
_lowercase =list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __A (self ) -> int:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __A (self ) -> Optional[int]:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __A (self ) -> Any:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __A (self ) -> int:
_lowercase =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_lowercase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase )
_lowercase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_lowercase =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 5 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = GPTaTokenizer
SCREAMING_SNAKE_CASE__ = GPTaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = {'''add_prefix_space''': True}
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_lowercase =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
_lowercase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_lowercase ={'''unk_token''': '''<unk>'''}
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def __A (self , **UpperCAmelCase ) -> str:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A (self , **UpperCAmelCase ) -> Dict:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A (self , UpperCAmelCase ) -> Tuple:
_lowercase ='''lower newer'''
_lowercase ='''lower newer'''
return input_text, output_text
def __A (self ) -> List[Any]:
_lowercase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase ='''lower newer'''
_lowercase =['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_lowercase =tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def __A (self ) -> List[str]:
if not self.test_rust_tokenizer:
return
_lowercase =self.get_tokenizer()
_lowercase =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
_lowercase ='''lower newer'''
# Testing tokenization
_lowercase =tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
_lowercase =rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
_lowercase =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
_lowercase =rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
_lowercase =self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
_lowercase =tokenizer.encode(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
_lowercase =rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing the unknown token
_lowercase =tokens + [rust_tokenizer.unk_token]
_lowercase =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def __A (self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A (self , UpperCAmelCase=1_5 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowercase =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
_lowercase ='''This is a simple input'''
_lowercase =['''This is a simple input 1''', '''This is a simple input 2''']
_lowercase =('''This is a simple input''', '''This is a pair''')
_lowercase =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
def __A (self ) -> Dict:
_lowercase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_lowercase ='''This is a simple input'''
_lowercase =['''This is a simple input looooooooong''', '''This is a simple input''']
_lowercase =('''This is a simple input''', '''This is a pair''')
_lowercase =[
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_lowercase =tokenizer.pad_token_id
_lowercase =tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_lowercase =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors='''np''' )
_lowercase =tokenizer(*UpperCAmelCase , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_lowercase =tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __A (self ) -> int:
_lowercase ='''$$$'''
_lowercase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase , add_bos_token=UpperCAmelCase )
_lowercase ='''This is a simple input'''
_lowercase =['''This is a simple input 1''', '''This is a simple input 2''']
_lowercase =tokenizer.bos_token_id
_lowercase =tokenizer(UpperCAmelCase )
_lowercase =tokenizer(UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowercase =tokenizer.decode(out_s.input_ids )
_lowercase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A (self ) -> Union[str, Any]:
pass
def __A (self ) -> Optional[Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_lowercase =[self.get_tokenizer(do_lower_case=UpperCAmelCase , add_bos_token=UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_lowercase ='''Encode this.'''
_lowercase ='''This one too please.'''
_lowercase =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
encoded_sequence += tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_lowercase =tokenizer.encode_plus(
UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , )
_lowercase =encoded_sequence_dict['''input_ids''']
_lowercase =encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
_lowercase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(UpperCAmelCase )
]
_lowercase =[x for x in filtered_sequence if x is not None]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_lowercase =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCAmelCase )
_lowercase ='''A photo of a cat'''
_lowercase =tokenizer.encode(
UpperCAmelCase , )
self.assertEqual(UpperCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''test_opt''' )
_lowercase =AutoTokenizer.from_pretrained('''./test_opt''' )
_lowercase =tokenizer.encode(
UpperCAmelCase , )
self.assertEqual(UpperCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def __A (self ) -> List[str]:
_lowercase =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=UpperCAmelCase )
_lowercase ='''A photo of a cat'''
_lowercase =tokenizer.encode(
UpperCAmelCase , )
# Same as above
self.assertEqual(UpperCAmelCase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def __A (self ) -> Any:
_lowercase =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=UpperCAmelCase )
_lowercase ='''bos'''
_lowercase =tokenizer.get_vocab()['''bos''']
_lowercase ='''A photo of a cat'''
_lowercase =tokenizer.encode(
UpperCAmelCase , )
# We changed the bos token
self.assertEqual(UpperCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained('''./tok''' )
_lowercase =AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
_lowercase =tokenizer.encode(
UpperCAmelCase , )
self.assertEqual(UpperCAmelCase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__snake_case ) , __snake_case )
return number - int(__snake_case )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 5 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_0 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=None , ) -> str:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =use_labels
_lowercase =scope
def __A (self ) -> int:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =self.get_config()
return config, input_ids, input_mask, token_labels
def __A (self ) -> Any:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def __A (self ) -> Dict:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =self.prepare_config_and_inputs()
_lowercase =True
_lowercase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) -> Tuple:
_lowercase =BertGenerationEncoder(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) -> List[str]:
_lowercase =True
_lowercase =BertGenerationEncoder(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ) -> Dict:
_lowercase =True
_lowercase =True
_lowercase =BertGenerationDecoder(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
# first forward pass
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
_lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , ) -> Optional[int]:
_lowercase =BertGenerationDecoder(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self ) -> Tuple:
_lowercase , _lowercase , _lowercase , _lowercase =self.prepare_config_and_inputs()
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (BertGenerationDecoder,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __A (self ) -> Dict:
_lowercase =BertGenerationEncoderTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> List[str]:
self.config_tester.run_common_tests()
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase , _lowercase , _lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
_lowercase ='''bert'''
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase )
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase )
def __A (self ) -> int:
# This regression test was failing with PyTorch < 1.3
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase =None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase )
@slow
def __A (self ) -> Union[str, Any]:
_lowercase =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> List[str]:
_lowercase =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowercase =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowercase =model(UpperCAmelCase )[0]
_lowercase =torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , UpperCAmelCase )
_lowercase =torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> Dict:
_lowercase =BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_lowercase =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
_lowercase =model(UpperCAmelCase )[0]
_lowercase =torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , UpperCAmelCase )
_lowercase =torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 5 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ = '''src/diffusers'''
UpperCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase__ = spec.loader.load_module()
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Tuple:
"""simple docstring"""
return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , __snake_case ) is not None
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =object_name.split('''.''' )
_lowercase =0
# First let's find the module where our object lives.
_lowercase =parts[i]
while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , F"{module}.py" ) ):
i += 1
if i < len(__snake_case ):
_lowercase =os.path.join(__snake_case , parts[i] )
if i >= len(__snake_case ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(__snake_case , F"{module}.py" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowercase =f.readlines()
# Now let's find the class / func in the code!
_lowercase =''''''
_lowercase =0
for name in parts[i + 1 :]:
while (
line_index < len(__snake_case ) and re.search(rF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__snake_case ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowercase =line_index
while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowercase =lines[start_index:line_index]
return "".join(__snake_case )
UpperCAmelCase__ = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
UpperCAmelCase__ = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
UpperCAmelCase__ = re.compile(R'''<FILL\s+[^>]*>''')
def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =code.split('''\n''' )
_lowercase =0
while idx < len(__snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__snake_case ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =len(get_indent(__snake_case ) ) > 0
if has_indent:
_lowercase =F"class Bla:\n{code}"
_lowercase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__snake_case )
_lowercase =black.format_str(__snake_case , mode=__snake_case )
_lowercase , _lowercase =style_docstrings_in_code(__snake_case )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowercase =f.readlines()
_lowercase =[]
_lowercase =0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__snake_case ):
_lowercase =_re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowercase , _lowercase , _lowercase =search.groups()
_lowercase =find_code_in_diffusers(__snake_case )
_lowercase =get_indent(__snake_case )
_lowercase =line_index + 1 if indent == theoretical_indent else line_index + 2
_lowercase =theoretical_indent
_lowercase =start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowercase =True
while line_index < len(__snake_case ) and should_continue:
line_index += 1
if line_index >= len(__snake_case ):
break
_lowercase =lines[line_index]
_lowercase =_should_continue(__snake_case , __snake_case ) and re.search(F"^{indent}# End copy" , __snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowercase =lines[start_index:line_index]
_lowercase =''''''.join(__snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
_lowercase =[line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__snake_case ) is None]
_lowercase ='''\n'''.join(__snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(__snake_case ) > 0:
_lowercase =replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_lowercase =[_re_replace_pattern.search(__snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowercase , _lowercase , _lowercase =pattern.groups()
_lowercase =re.sub(__snake_case , __snake_case , __snake_case )
if option.strip() == "all-casing":
_lowercase =re.sub(obja.lower() , obja.lower() , __snake_case )
_lowercase =re.sub(obja.upper() , obja.upper() , __snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowercase =blackify(lines[start_index - 1] + theoretical_code )
_lowercase =theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowercase =lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowercase =start_index + 1
if overwrite and len(__snake_case ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
return diffs
def UpperCAmelCase_ ( __snake_case = False ) -> Tuple:
"""simple docstring"""
_lowercase =glob.glob(os.path.join(__snake_case , '''**/*.py''' ) , recursive=__snake_case )
_lowercase =[]
for filename in all_files:
_lowercase =is_copy_consistent(__snake_case , __snake_case )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(__snake_case ) > 0:
_lowercase ='''\n'''.join(__snake_case )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 5 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_lowercase =grid[0]
for row_n in range(1 , len(__snake_case ) ):
_lowercase =grid[row_n]
_lowercase =fill_row(__snake_case , __snake_case )
_lowercase =grid[row_n]
return grid[-1][-1]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> None:
_lowercase =data
# Initialize hash values
_lowercase =[
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
_lowercase =[
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
_lowercase =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __A (UpperCAmelCase ) -> bytes:
_lowercase =B'''\x80''' + (B'''\x00''' * (6_3 - (len(UpperCAmelCase ) + 8) % 6_4))
_lowercase =struct.pack('''>Q''' , (len(UpperCAmelCase ) * 8) )
return data + padding + big_endian_integer
def __A (self ) -> None:
# Convert into blocks of 64 bytes
_lowercase =[
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowercase =list(struct.unpack('''>16L''' , UpperCAmelCase ) )
# add 48 0-ed integers
words += [0] * 4_8
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
_lowercase =(
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
_lowercase =(
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
_lowercase =(
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
_lowercase =self.ror(UpperCAmelCase , 6 ) ^ self.ror(UpperCAmelCase , 1_1 ) ^ self.ror(UpperCAmelCase , 2_5 )
_lowercase =(e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
_lowercase =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
_lowercase =self.ror(UpperCAmelCase , 2 ) ^ self.ror(UpperCAmelCase , 1_3 ) ^ self.ror(UpperCAmelCase , 2_2 )
_lowercase =(a & b) ^ (a & c) ^ (b & c)
_lowercase =(sa + maj) % 0X1_0_0_0_0_0_0_0_0
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =(
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
_lowercase =[a, b, c, d, e, f, g, h]
# Modify final values
_lowercase =[
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
_lowercase =''''''.join([hex(UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
return 0Xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> None:
import hashlib
_lowercase =bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(UpperCAmelCase ).hash , hashlib.shaaaa(UpperCAmelCase ).hexdigest() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
_lowercase =argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
_lowercase =parser.parse_args()
_lowercase =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
_lowercase =f.read()
else:
_lowercase =bytes(__snake_case , '''utf-8''' )
print(SHAaaa(__snake_case ).hash )
if __name__ == "__main__":
main()
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=[3_0, 3_0] , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=8 , UpperCAmelCase=1_0 , ) -> Tuple:
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =scope
_lowercase =n_targets
_lowercase =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_lowercase =(image_size[1] // patch_size) * (image_size[0] // patch_size)
_lowercase =num_patches + 1 + self.num_detection_tokens
def __A (self ) -> str:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_lowercase =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_lowercase =[]
for i in range(self.batch_size ):
_lowercase ={}
_lowercase =torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCAmelCase )
_lowercase =torch.rand(self.n_targets , 4 , device=UpperCAmelCase )
labels.append(UpperCAmelCase )
_lowercase =self.get_config()
return config, pixel_values, labels
def __A (self ) -> Dict:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =YolosModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =YolosForObjectDetection(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(pixel_values=UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_lowercase =model(pixel_values=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __A (self ) -> List[Any]:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Optional[int]:
_lowercase =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_lowercase =[]
for i in range(self.model_tester.batch_size ):
_lowercase ={}
_lowercase =torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCAmelCase , dtype=torch.long )
_lowercase =torch.ones(
self.model_tester.n_targets , 4 , device=UpperCAmelCase , dtype=torch.float )
labels.append(UpperCAmelCase )
_lowercase =labels
return inputs_dict
def __A (self ) -> List[Any]:
_lowercase =YolosModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> List[str]:
self.config_tester.run_common_tests()
def __A (self ) -> Dict:
# YOLOS does not use inputs_embeds
pass
def __A (self ) -> Any:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def __A (self ) -> int:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
# in YOLOS, the seq_len is different
_lowercase =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_lowercase =True
_lowercase =False
_lowercase =True
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_lowercase =outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase =True
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_lowercase =outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowercase =len(UpperCAmelCase )
# Check attention is always last and order is fine
_lowercase =True
_lowercase =True
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_lowercase =1
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase ) )
_lowercase =outputs.attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __A (self ) -> Any:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_lowercase =outputs.hidden_states
_lowercase =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# YOLOS has a different seq_length
_lowercase =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCAmelCase )
@slow
def __A (self ) -> int:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =YolosModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
@cached_property
def __A (self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __A (self ) -> Optional[int]:
_lowercase =YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(inputs.pixel_values )
# verify outputs
_lowercase =torch.Size((1, 1_0_0, 9_2) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_lowercase =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCAmelCase , )
_lowercase =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify postprocessing
_lowercase =image_processor.post_process_object_detection(
UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_lowercase =torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCAmelCase )
_lowercase =[7_5, 7_5, 1_7, 6_3, 1_7]
_lowercase =torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCAmelCase )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCAmelCase , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCAmelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCAmelCase ) )
| 5 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 1 |
UpperCAmelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =True
_lowercase =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def UpperCAmelCase_ ( __snake_case ) -> list[list[int]]:
"""simple docstring"""
_lowercase =len(__snake_case ) * [False]
_lowercase ={vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
_lowercase =[]
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
_lowercase =[]
_lowercase =len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
_lowercase =order[len(__snake_case ) - i - 1]
if not visited[vert]:
_lowercase =find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
UpperCAmelCase__ = re.compile(R'''([a-z\d])([A-Z])''')
UpperCAmelCase__ = re.compile(R'''(?<!_)_(?!_)''')
UpperCAmelCase__ = re.compile(R'''(_{2,})''')
UpperCAmelCase__ = R'''^\w+(\.\w+)*$'''
UpperCAmelCase__ = R'''<>:/\|?*'''
def UpperCAmelCase_ ( __snake_case ) -> Any:
"""simple docstring"""
_lowercase =_uppercase_uppercase_re.sub(r'''\1_\2''' , __snake_case )
_lowercase =_lowercase_uppercase_re.sub(r'''\1_\2''' , __snake_case )
return name.lower()
def UpperCAmelCase_ ( __snake_case ) -> Any:
"""simple docstring"""
_lowercase =_single_underscore_re.split(__snake_case )
_lowercase =[_multiple_underscores_re.split(__snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__snake_case ) if n != '''''' )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(__snake_case ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(__snake_case ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __snake_case ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__snake_case )}-{split}"
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =filename_prefix_for_split(__snake_case , __snake_case )
if filetype_suffix:
prefix += F".{filetype_suffix}"
_lowercase =os.path.join(__snake_case , __snake_case )
return F"{filepath}*"
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None ) -> Tuple:
"""simple docstring"""
_lowercase =filename_prefix_for_split(__snake_case , __snake_case )
_lowercase =os.path.join(__snake_case , __snake_case )
if shard_lengths:
_lowercase =len(__snake_case )
_lowercase =[F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__snake_case )]
if filetype_suffix:
_lowercase =[filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
_lowercase =prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 5 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase__ = '''src/transformers'''
UpperCAmelCase__ = '''docs/source/en'''
UpperCAmelCase__ = '''.'''
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowercase =f.readlines()
# Find the start prompt.
_lowercase =0
while not lines[start_index].startswith(__snake_case ):
start_index += 1
start_index += 1
_lowercase =start_index
while not lines[end_index].startswith(__snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase__ = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCAmelCase__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCAmelCase_ ( __snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __snake_case )
return [m.group(0 ) for m in matches]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =2 if text == '''✅''' or text == '''❌''' else len(__snake_case )
_lowercase =(width - text_length) // 2
_lowercase =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowercase ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_lowercase ={name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
_lowercase =collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(__snake_case ):
_lowercase =None
if attr_name.endswith('''Tokenizer''' ):
_lowercase =slow_tokenizers
_lowercase =attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
_lowercase =fast_tokenizers
_lowercase =attr_name[:-13]
elif _re_tf_models.match(__snake_case ) is not None:
_lowercase =tf_models
_lowercase =_re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
_lowercase =flax_models
_lowercase =_re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
_lowercase =pt_models
_lowercase =_re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
_lowercase =True
break
# Try again after removing the last word in the name
_lowercase =''''''.join(camel_case_split(__snake_case )[:-1] )
# Let's build that table!
_lowercase =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_lowercase =['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_lowercase =[len(__snake_case ) + 2 for c in columns]
_lowercase =max([len(__snake_case ) for name in model_names] ) + 2
# Build the table per se
_lowercase ='''|''' + '''|'''.join([_center_text(__snake_case , __snake_case ) for c, w in zip(__snake_case , __snake_case )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
_lowercase ={True: '''✅''', False: '''❌'''}
for name in model_names:
_lowercase =model_name_to_prefix[name]
_lowercase =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__snake_case , __snake_case ) for l, w in zip(__snake_case , __snake_case )] ) + "|\n"
return table
def UpperCAmelCase_ ( __snake_case=False ) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase =_find_text_in_file(
filename=os.path.join(__snake_case , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
_lowercase =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__snake_case , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCAmelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 5 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=3_2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=1_0 , UpperCAmelCase=0.02 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=[2, 3, 4] , UpperCAmelCase=None , ) -> Optional[int]:
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =num_channels
_lowercase =num_stages
_lowercase =hidden_sizes
_lowercase =depths
_lowercase =is_training
_lowercase =use_labels
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_labels
_lowercase =initializer_range
_lowercase =out_features
_lowercase =out_indices
_lowercase =scope
def __A (self ) -> Union[str, Any]:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.num_labels )
_lowercase =self.get_config()
return config, pixel_values, labels
def __A (self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
_lowercase =ConvNextVaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =ConvNextVaForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
_lowercase =ConvNextVaBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowercase =None
_lowercase =ConvNextVaBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A (self ) -> Optional[int]:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
def __A (self ) -> List[str]:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[Any]:
_lowercase =ConvNextVaModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A (self ) -> Tuple:
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def __A (self ) -> Optional[Any]:
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def __A (self ) -> List[str]:
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def __A (self ) -> Any:
pass
def __A (self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_with_labels()
_lowercase =True
if model_class.__name__ in [
*get_values(UpperCAmelCase ),
*get_values(UpperCAmelCase ),
]:
continue
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_lowercase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_lowercase =model(**UpperCAmelCase ).loss
loss.backward()
def __A (self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_with_labels()
_lowercase =False
_lowercase =True
if (
model_class.__name__
in [*get_values(UpperCAmelCase ), *get_values(UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_lowercase =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
_lowercase =model(**UpperCAmelCase ).loss
loss.backward()
def __A (self ) -> Tuple:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> List[Any]:
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_lowercase =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
_lowercase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def __A (self ) -> int:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =ConvNextVaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Any:
"""simple docstring"""
_lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
@cached_property
def __A (self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def __A (self ) -> Any:
_lowercase =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(UpperCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =preprocessor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**UpperCAmelCase )
# verify the logits
_lowercase =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_lowercase =torch.tensor([0.9996, 0.1966, -0.4386] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 5 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 1 |
import os
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =os.path.dirname(os.path.realpath(__snake_case ) )
_lowercase =os.path.join(__snake_case , '''triangle.txt''' )
with open(__snake_case ) as f:
_lowercase =f.readlines()
_lowercase =[]
for line in triangle:
_lowercase =[]
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(__snake_case ) )
a.append(__snake_case )
for i in range(1 , len(__snake_case ) ):
for j in range(len(a[i] ) ):
_lowercase =a[i - 1][j] if j != len(a[i - 1] ) else 0
_lowercase =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__snake_case , __snake_case )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 5 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 5 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''ChineseCLIPFeatureExtractor''']
UpperCAmelCase__ = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 1 |
from __future__ import annotations
UpperCAmelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
_lowercase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the reference grid
_lowercase =1
_lowercase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the action grid
_lowercase =init[0]
_lowercase =init[1]
_lowercase =0
_lowercase =g + heuristic[x][y] # cost from starting cell to destination cell
_lowercase =[[f, g, x, y]]
_lowercase =False # flag that is set when search is complete
_lowercase =False # flag set if we can't find expand
while not found and not resign:
if len(__snake_case ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowercase =cell.pop()
_lowercase =next_cell[2]
_lowercase =next_cell[3]
_lowercase =next_cell[1]
if x == goal[0] and y == goal[1]:
_lowercase =True
else:
for i in range(len(__snake_case ) ): # to try out different valid actions
_lowercase =x + DIRECTIONS[i][0]
_lowercase =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowercase =g + cost
_lowercase =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowercase =1
_lowercase =i
_lowercase =[]
_lowercase =goal[0]
_lowercase =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowercase =x - DIRECTIONS[action[x][y]][0]
_lowercase =y - DIRECTIONS[action[x][y]][1]
_lowercase =xa
_lowercase =ya
invpath.append([x, y] )
_lowercase =[]
for i in range(len(__snake_case ) ):
path.append(invpath[len(__snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ = 99
UpperCAmelCase__ ,UpperCAmelCase__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 1 |
import qiskit
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_lowercase =qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowercase =qiskit.QuantumCircuit(__snake_case , __snake_case )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowercase =qiskit.execute(__snake_case , __snake_case , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 5 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_lowercase =os.path.dirname(os.path.realpath(__snake_case ) )
_lowercase =os.path.join(__snake_case , '''words.txt''' )
_lowercase =''''''
with open(__snake_case ) as f:
_lowercase =f.readline()
_lowercase =[word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
_lowercase =[
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 5 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
class lowerCamelCase__ ( metaclass=lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''flax''', '''transformers''']
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def __A (cls , *UpperCAmelCase , **UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 5 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
class lowerCamelCase__ :
def __init__(self ) -> Tuple:
_lowercase =''''''
_lowercase =''''''
_lowercase =[]
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowercase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_lowercase =self.__min_dist_top_down_dp(UpperCAmelCase , n - 1 )
_lowercase =self.__min_dist_top_down_dp(m - 1 , UpperCAmelCase )
_lowercase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
_lowercase =1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return self.dp[m][n]
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =worda
_lowercase =worda
_lowercase =[[-1 for _ in range(len(UpperCAmelCase ) )] for _ in range(len(UpperCAmelCase ) )]
return self.__min_dist_top_down_dp(len(UpperCAmelCase ) - 1 , len(UpperCAmelCase ) - 1 )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =worda
_lowercase =worda
_lowercase =len(UpperCAmelCase )
_lowercase =len(UpperCAmelCase )
_lowercase =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowercase =j
elif j == 0: # second string is empty
_lowercase =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowercase =self.dp[i - 1][j - 1]
else:
_lowercase =self.dp[i][j - 1]
_lowercase =self.dp[i - 1][j]
_lowercase =self.dp[i - 1][j - 1]
_lowercase =1 + min(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
UpperCAmelCase__ = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
UpperCAmelCase__ = input('''Enter the first string: ''').strip()
UpperCAmelCase__ = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 5 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
UpperCAmelCase__ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModel)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCamelCase__ ( _BaseAutoModelClass):
SCREAMING_SNAKE_CASE__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 5 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=6_4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=1 , ) -> int:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
_lowercase =q_groups
_lowercase =k_groups
_lowercase =v_groups
_lowercase =post_attention_groups
_lowercase =intermediate_groups
_lowercase =output_groups
def __A (self ) -> Optional[Any]:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self ) -> Optional[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
_lowercase =SqueezeBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =SqueezeBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =SqueezeBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =self.num_labels
_lowercase =SqueezeBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
_lowercase =self.num_labels
_lowercase =SqueezeBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
_lowercase =self.num_choices
_lowercase =SqueezeBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A (self ) -> str:
_lowercase =self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> List[Any]:
_lowercase =SqueezeBertModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , dim=3_7 )
def __A (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase )
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase )
def __A (self ) -> List[str]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase )
@slow
def __A (self ) -> Union[str, Any]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =SqueezeBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> Dict:
_lowercase =SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
_lowercase =torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_lowercase =model(UpperCAmelCase )[0]
_lowercase =torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCAmelCase )
_lowercase =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCAmelCase__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCAmelCase__ = [0, 25, 50]
UpperCAmelCase__ = [25, 50, 75]
UpperCAmelCase__ = fuzz.membership.trimf(X, abca)
UpperCAmelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCAmelCase__ = np.ones(75)
UpperCAmelCase__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCAmelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCAmelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCAmelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCAmelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCAmelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCAmelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCAmelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCAmelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 5 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 1 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Any:
"""simple docstring"""
assert x is not None
assert y is not None
_lowercase =len(__snake_case )
_lowercase =len(__snake_case )
# declaring the array for storing the dp values
_lowercase =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowercase =1 if x[i - 1] == y[j - 1] else 0
_lowercase =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowercase =''''''
_lowercase , _lowercase =m, n
while i > 0 and j > 0:
_lowercase =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowercase =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase__ = '''AGGTAB'''
UpperCAmelCase__ = '''GXTXAYB'''
UpperCAmelCase__ = 4
UpperCAmelCase__ = '''GTAB'''
UpperCAmelCase__ ,UpperCAmelCase__ = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 5 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =0
if start < end:
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase , _lowercase =_in_place_partition(__snake_case , __snake_case , __snake_case )
count += _in_place_quick_sort(__snake_case , __snake_case , p - 1 )
count += _in_place_quick_sort(__snake_case , p + 1 , __snake_case )
return count
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =0
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase =start - 1
for index in range(__snake_case , __snake_case ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowercase =new_pivot_index + 1
_lowercase =a[new_pivot_index]
_lowercase =a[index]
_lowercase =temp
_lowercase =a[new_pivot_index + 1]
_lowercase =a[end]
_lowercase =temp
return new_pivot_index + 1, count
UpperCAmelCase__ = TemporaryFile()
UpperCAmelCase__ = 100 # 1000 elements are to be sorted
UpperCAmelCase__ ,UpperCAmelCase__ = 0, 1 # mean and standard deviation
UpperCAmelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ = np.load(outfile)
UpperCAmelCase__ = len(M) - 1
UpperCAmelCase__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 5 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
from torch import nn
def UpperCAmelCase_ ( __snake_case ) -> Any:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 5 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
UpperCAmelCase__ = None
UpperCAmelCase__ = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
UpperCAmelCase__ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase_ ( __snake_case , __snake_case=1 , __snake_case=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
with open(__snake_case , '''r''' ) as f:
return json.load(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
with open(__snake_case , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]:
"""simple docstring"""
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowercase =os.path.join(__snake_case , '''tmp''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
_lowercase =read_json(os.path.join(__snake_case , '''params.json''' ) )
_lowercase =NUM_SHARDS[model_size]
_lowercase =params['''n_layers''']
_lowercase =params['''n_heads''']
_lowercase =n_heads // num_shards
_lowercase =params['''dim''']
_lowercase =dim // n_heads
_lowercase =1_00_00.0
_lowercase =1.0 / (base ** (torch.arange(0 , __snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowercase =params['''n_kv_heads'''] # for GQA / MQA
_lowercase =n_heads_per_shard // num_key_value_heads
_lowercase =dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowercase =n_heads
_lowercase =n_heads_per_shard
_lowercase =dim
# permute for sliced rotary
def permute(__snake_case , __snake_case=n_heads , __snake_case=dim , __snake_case=dim ):
return w.view(__snake_case , dima // n_heads // 2 , 2 , __snake_case ).transpose(1 , 2 ).reshape(__snake_case , __snake_case )
print(F"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowercase =torch.load(os.path.join(__snake_case , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
_lowercase =[
torch.load(os.path.join(__snake_case , F"consolidated.{i:02d}.pth" ) , map_location='''cpu''' )
for i in range(__snake_case )
]
_lowercase =0
_lowercase ={'''weight_map''': {}}
for layer_i in range(__snake_case ):
_lowercase =F"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_lowercase ={
F"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wq.weight"] ),
F"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[F"layers.{layer_i}.attention.wk.weight"] ),
F"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[F"layers.{layer_i}.attention.wv.weight"],
F"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[F"layers.{layer_i}.attention.wo.weight"],
F"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w1.weight"],
F"model.layers.{layer_i}.mlp.down_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w2.weight"],
F"model.layers.{layer_i}.mlp.up_proj.weight": loaded[F"layers.{layer_i}.feed_forward.w3.weight"],
F"model.layers.{layer_i}.input_layernorm.weight": loaded[F"layers.{layer_i}.attention_norm.weight"],
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[F"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowercase ={
F"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
F"layers.{layer_i}.attention_norm.weight"
].clone(),
F"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
F"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
_lowercase =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wq.weight"].view(__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) )
_lowercase =permute(
torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wk.weight"].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case ) , __snake_case , __snake_case , __snake_case , )
_lowercase =torch.cat(
[
loaded[i][F"layers.{layer_i}.attention.wv.weight"].view(
__snake_case , __snake_case , __snake_case )
for i in range(__snake_case )
] , dim=0 , ).reshape(__snake_case , __snake_case )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.attention.wo.weight"] for i in range(__snake_case )] , dim=1 )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__snake_case )] , dim=0 )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__snake_case )] , dim=1 )
_lowercase =torch.cat(
[loaded[i][F"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__snake_case )] , dim=0 )
_lowercase =inv_freq
for k, v in state_dict.items():
_lowercase =filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
_lowercase =F"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
_lowercase ={
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
_lowercase ={
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(__snake_case )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
_lowercase =filename
param_count += v.numel()
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
# Write configs
_lowercase ={'''total_size''': param_count * 2}
write_json(__snake_case , os.path.join(__snake_case , '''pytorch_model.bin.index.json''' ) )
_lowercase =params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
_lowercase =params['''multiple_of'''] if '''multiple_of''' in params else 256
_lowercase =LlamaConfig(
hidden_size=__snake_case , intermediate_size=compute_intermediate_size(__snake_case , __snake_case , __snake_case ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=__snake_case , )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
_lowercase =LlamaForCausalLM.from_pretrained(__snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(__snake_case , safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
_lowercase =tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_lowercase =argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=__snake_case , help='''Whether or not to save using `safetensors`.''' )
_lowercase =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowercase =os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , __snake_case )
if __name__ == "__main__":
main()
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=3_0 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0 , UpperCAmelCase=0.02 , UpperCAmelCase=None , ) -> List[str]:
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =is_training
_lowercase =use_labels
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase =(image_size // patch_size) ** 2
_lowercase =num_patches + 1
def __A (self ) -> Dict:
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =self.get_config()
return config, pixel_values, labels
def __A (self ) -> Dict:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
_lowercase =ViTMSNModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_lowercase =self.type_sequence_label_size
_lowercase =ViTMSNForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , labels=UpperCAmelCase )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase =1
_lowercase =ViTMSNForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A (self ) -> str:
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> int:
_lowercase =ViTMSNModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def __A (self ) -> List[str]:
pass
def __A (self ) -> Dict:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def __A (self ) -> List[Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def __A (self ) -> Optional[Any]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =ViTMSNModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
@cached_property
def __A (self ) -> Tuple:
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def __A (self ) -> Optional[Any]:
torch.manual_seed(2 )
_lowercase =ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(UpperCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**UpperCAmelCase )
# verify the logits
_lowercase =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
_lowercase =torch.tensor([-0.0803, -0.4454, -0.2375] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 5 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 5 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase__ = NewType('''DataClass''', Any)
UpperCAmelCase__ = NewType('''DataClassType''', Any)
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def UpperCAmelCase_ ( __snake_case ) -> Callable[[str], Any]:
"""simple docstring"""
_lowercase ={str(__snake_case ): choice for choice in choices}
return lambda __snake_case : str_to_choice.get(__snake_case , __snake_case )
def UpperCAmelCase_ ( *,
__snake_case = None , __snake_case = None , __snake_case = dataclasses.MISSING , __snake_case = dataclasses.MISSING , __snake_case = None , **__snake_case , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowercase ={}
if aliases is not None:
_lowercase =aliases
if help is not None:
_lowercase =help
return dataclasses.field(metadata=__snake_case , default=__snake_case , default_factory=__snake_case , **__snake_case )
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 42
def __init__(self , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_lowercase =ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase )
if dataclasses.is_dataclass(UpperCAmelCase ):
_lowercase =[dataclass_types]
_lowercase =list(UpperCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase )
@staticmethod
def __A (UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =f"--{field.name}"
_lowercase =field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
_lowercase =kwargs.pop('''aliases''' , [] )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =[aliases]
_lowercase =getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase , '''UnionType''' ) and isinstance(UpperCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f" Problem encountered in field '{field.name}'." )
if type(UpperCAmelCase ) not in field.type.__args__:
# filter `str` in Union
_lowercase =field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowercase =getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowercase =(
field.type.__args__[0] if isinstance(UpperCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_lowercase =getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowercase ={}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase ) and issubclass(field.type , UpperCAmelCase )):
if origin_type is Literal:
_lowercase =field.type.__args__
else:
_lowercase =[x.value for x in field.type]
_lowercase =make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
_lowercase =field.default
else:
_lowercase =True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowercase =copy(UpperCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
_lowercase =string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowercase =False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowercase =default
# This tells argparse we accept 0 or 1 value after --field_name
_lowercase ='''?'''
# This is the value that will get picked if we do --field_name (without value)
_lowercase =True
elif isclass(UpperCAmelCase ) and issubclass(UpperCAmelCase , UpperCAmelCase ):
_lowercase =field.type.__args__[0]
_lowercase ='''+'''
if field.default_factory is not dataclasses.MISSING:
_lowercase =field.default_factory()
elif field.default is dataclasses.MISSING:
_lowercase =True
else:
_lowercase =field.type
if field.default is not dataclasses.MISSING:
_lowercase =field.default
elif field.default_factory is not dataclasses.MISSING:
_lowercase =field.default_factory()
else:
_lowercase =True
parser.add_argument(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowercase =False
parser.add_argument(f"--no_{field.name}" , action='''store_false''' , dest=field.name , **UpperCAmelCase )
def __A (self , UpperCAmelCase ) -> Any:
if hasattr(UpperCAmelCase , '''_argument_group_name''' ):
_lowercase =self.add_argument_group(dtype._argument_group_name )
else:
_lowercase =self
try:
_lowercase =get_type_hints(UpperCAmelCase )
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(UpperCAmelCase ):
_lowercase ='''.'''.join(map(UpperCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase ):
if not field.init:
continue
_lowercase =type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase , UpperCAmelCase )
def __A (self , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_lowercase =[]
if args_filename:
args_files.append(Path(UpperCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowercase =ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase , type=UpperCAmelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
_lowercase , _lowercase =args_file_parser.parse_known_args(args=UpperCAmelCase )
_lowercase =vars(UpperCAmelCase ).get(args_file_flag.lstrip('''-''' ) , UpperCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase ) for p in cmd_args_file_paths] )
_lowercase =[]
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowercase =file_args + args if args is not None else file_args + sys.argv[1:]
_lowercase , _lowercase =self.parse_known_args(args=UpperCAmelCase )
_lowercase =[]
for dtype in self.dataclass_types:
_lowercase ={f.name for f in dataclasses.fields(UpperCAmelCase ) if f.init}
_lowercase ={k: v for k, v in vars(UpperCAmelCase ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase , UpperCAmelCase )
_lowercase =dtype(**UpperCAmelCase )
outputs.append(UpperCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def __A (self , UpperCAmelCase , UpperCAmelCase = False ) -> Tuple[DataClass, ...]:
_lowercase =set(args.keys() )
_lowercase =[]
for dtype in self.dataclass_types:
_lowercase ={f.name for f in dataclasses.fields(UpperCAmelCase ) if f.init}
_lowercase ={k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_lowercase =dtype(**UpperCAmelCase )
outputs.append(UpperCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase )}" )
return tuple(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = False ) -> Tuple[DataClass, ...]:
with open(Path(UpperCAmelCase ) , encoding='''utf-8''' ) as open_json_file:
_lowercase =json.loads(open_json_file.read() )
_lowercase =self.parse_dict(UpperCAmelCase , allow_extra_keys=UpperCAmelCase )
return tuple(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = False ) -> Tuple[DataClass, ...]:
_lowercase =self.parse_dict(yaml.safe_load(Path(UpperCAmelCase ).read_text() ) , allow_extra_keys=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 5 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
else:
return a * actual_power(__snake_case , int(b / 2 ) ) * actual_power(__snake_case , int(b / 2 ) )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(__snake_case , __snake_case )
return actual_power(__snake_case , __snake_case )
if __name__ == "__main__":
print(power(-2, -3))
| 5 | 1 |
from __future__ import annotations
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
if len(__snake_case ) == 0:
return array
_lowercase , _lowercase =min(__snake_case ), max(__snake_case )
# Compute the variables
_lowercase =_max - _min + 1
_lowercase , _lowercase =[0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowercase =i - _min
_lowercase =i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowercase =0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
_lowercase =holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = input('''Enter numbers separated by comma:\n''')
UpperCAmelCase__ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 5 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase__ ( nn.Module):
def __init__(self , UpperCAmelCase = 1_6 , UpperCAmelCase = 8_8 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 3_2 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = None , ) -> Any:
super().__init__()
_lowercase =nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=UpperCAmelCase , attention_head_dim=UpperCAmelCase , in_channels=UpperCAmelCase , num_layers=UpperCAmelCase , dropout=UpperCAmelCase , norm_num_groups=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , attention_bias=UpperCAmelCase , sample_size=UpperCAmelCase , num_vector_embeds=UpperCAmelCase , activation_fn=UpperCAmelCase , num_embeds_ada_norm=UpperCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowercase =0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowercase =[7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowercase =[1, 0]
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = True , ) -> str:
_lowercase =hidden_states
_lowercase =[]
_lowercase =0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowercase =encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowercase =self.transformer_index_for_condition[i]
_lowercase =self.transformers[transformer_index](
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , return_dict=UpperCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowercase =encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowercase =output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=UpperCAmelCase )
| 5 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 5 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Any:
_lowercase =str(id_ )
_lowercase =None
_lowercase =None
_lowercase =[]
_lowercase ={} # {vertex:distance}
def __lt__(self , UpperCAmelCase ) -> List[str]:
return self.key < other.key
def __repr__(self ) -> str:
return self.id
def __A (self , UpperCAmelCase ) -> Dict:
self.neighbors.append(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =weight
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __snake_case )
graph[b - 1].add_edge(graph[a - 1] , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
_lowercase =[]
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =graph[:]
while q:
_lowercase =min(__snake_case )
q.remove(__snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
for i in range(1 , len(__snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Iterator[tuple]:
"""simple docstring"""
for u in graph:
_lowercase =math.inf
_lowercase =None
_lowercase =0
_lowercase =list(__snake_case )
hq.heapify(__snake_case )
while h:
_lowercase =hq.heappop(__snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowercase =u
_lowercase =u.edges[v.id]
hq.heapify(__snake_case )
for i in range(1 , len(__snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCAmelCase__ = 0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCAmelCase__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class lowerCamelCase__ :
def __init__(self ) -> int:
_lowercase =WATERMARK_BITS
_lowercase =WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def __A (self , UpperCAmelCase ) -> Dict:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
_lowercase =(2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowercase =[self.encoder.encode(UpperCAmelCase , '''dwtDct''' ) for image in images]
_lowercase =torch.from_numpy(np.array(UpperCAmelCase ) ).permute(0 , 3 , 1 , 2 )
_lowercase =torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 1 |
def UpperCAmelCase_ ( __snake_case = 4000000 ) -> int:
"""simple docstring"""
_lowercase =[]
_lowercase , _lowercase =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
_lowercase , _lowercase =b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = '''▁'''
UpperCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
UpperCAmelCase__ = {
'''facebook/xglm-564M''': 2048,
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__(self , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
_lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowercase =7
_lowercase =[f"<madeupword{i}>" for i in range(self.num_madeup_words )]
_lowercase =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
_lowercase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase =1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
_lowercase =len(self.sp_model )
_lowercase ={f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCAmelCase )
_lowercase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> List[str]:
_lowercase =self.__dict__.copy()
_lowercase =None
_lowercase =self.sp_model.serialized_model_proto()
return state
def __setstate__(self , UpperCAmelCase ) -> Any:
_lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowercase ={}
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowercase =[self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase ))
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase ))
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __A (self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __A (self ) -> Union[str, Any]:
_lowercase ={self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A (self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def __A (self , UpperCAmelCase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase =self.sp_model.PieceToId(UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A (self , UpperCAmelCase ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A (self , UpperCAmelCase ) -> Dict:
_lowercase =''''''.join(UpperCAmelCase ).replace(UpperCAmelCase , ''' ''' ).strip()
return out_string
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowercase =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , '''wb''' ) as fi:
_lowercase =self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 5 |
import comet # From: unbabel-comet
import torch
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCAmelCase__ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCAmelCase__ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
def __A (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __A (self , UpperCAmelCase ) -> Dict:
if self.config_name == "default":
_lowercase =comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
_lowercase =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) -> int:
if gpus is None:
_lowercase =1 if torch.cuda.is_available() else 0
_lowercase ={'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowercase =[dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
_lowercase , _lowercase =self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 5 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , UpperCAmelCase=2_0_0_0 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=1e-3 ) -> List[str]:
_lowercase =None
_lowercase =None
_lowercase =None
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
_lowercase =torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase , device=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowercase =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowercase =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowercase =std.flatten()
while len(std.shape ) < len(score.shape ):
_lowercase =std.unsqueeze(-1 )
_lowercase =-score / std
# compute
_lowercase =-1.0 / len(self.timesteps )
_lowercase =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowercase =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowercase =beta_t.unsqueeze(-1 )
_lowercase =-0.5 * beta_t * x
_lowercase =torch.sqrt(UpperCAmelCase )
_lowercase =drift - diffusion**2 * score
_lowercase =x + drift * dt
# add noise
_lowercase =randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase , device=x.device , dtype=x.dtype )
_lowercase =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__(self ) -> str:
return self.config.num_train_timesteps
| 5 | 1 |
import os
def UpperCAmelCase_ ( __snake_case = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as input_file:
_lowercase =[
[int(__snake_case ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
_lowercase =len(__snake_case )
_lowercase =len(matrix[0] )
_lowercase =[[-1 for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
_lowercase =matrix[i][0]
for j in range(1 , __snake_case ):
for i in range(__snake_case ):
_lowercase =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __snake_case ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowercase =[144, 192, 240]
_lowercase =[16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowercase =[96, 120, 144]
_lowercase =[16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowercase =[64, 80, 96]
_lowercase =[16, 16, 24, 48, 64, 80, 320]
_lowercase =0.05
_lowercase =2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =512
_lowercase =16
_lowercase =21
_lowercase ='''pascal-voc-id2label.json'''
else:
_lowercase =1000
_lowercase ='''imagenet-1k-id2label.json'''
_lowercase ='''huggingface/label-files'''
_lowercase =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowercase ={int(__snake_case ): v for k, v in idalabel.items()}
_lowercase =idalabel
_lowercase ={v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __snake_case , __snake_case=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
_lowercase =name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
_lowercase =name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
_lowercase =name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
_lowercase =name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
_lowercase =name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
_lowercase =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
_lowercase =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
_lowercase =name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
_lowercase =name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
_lowercase =name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
_lowercase =name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
_lowercase =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
_lowercase =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
_lowercase =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
_lowercase =name.replace(F".global_rep.{i}.weight" , '''.layernorm.weight''' )
if F".global_rep.{i}.bias" in name:
_lowercase =name.replace(F".global_rep.{i}.bias" , '''.layernorm.bias''' )
if ".global_rep." in name:
_lowercase =name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
_lowercase =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
_lowercase =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
_lowercase =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
_lowercase =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
_lowercase =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
_lowercase =name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
_lowercase =name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
_lowercase =name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
_lowercase =name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
_lowercase =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
_lowercase =name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
_lowercase ='''mobilevit.''' + name
return name
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
_lowercase =''''''
else:
_lowercase ='''mobilevit.'''
for key in orig_state_dict.copy().keys():
_lowercase =orig_state_dict.pop(__snake_case )
if key[:8] == "encoder.":
_lowercase =key[8:]
if "qkv" in key:
_lowercase =key.split('''.''' )
_lowercase =int(key_split[0][6:] ) - 1
_lowercase =int(key_split[3] )
_lowercase =model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
_lowercase =layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowercase =(
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
_lowercase =val[:dim, :]
_lowercase =val[dim : dim * 2, :]
_lowercase =val[-dim:, :]
else:
_lowercase =val[:dim]
_lowercase =val[dim : dim * 2]
_lowercase =val[-dim:]
else:
_lowercase =val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowercase =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case=False ) -> int:
"""simple docstring"""
_lowercase =get_mobilevit_config(__snake_case )
# load original state_dict
_lowercase =torch.load(__snake_case , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
_lowercase =MobileViTForSemanticSegmentation(__snake_case ).eval()
else:
_lowercase =MobileViTForImageClassification(__snake_case ).eval()
_lowercase =convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowercase =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowercase =image_processor(images=prepare_img() , return_tensors='''pt''' )
_lowercase =model(**__snake_case )
_lowercase =outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowercase =torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowercase =torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowercase =torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_lowercase =torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowercase =torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowercase =torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
_lowercase ={
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
_lowercase =model_mapping[mobilevit_name]
image_processor.push_to_hub(__snake_case , organization='''apple''' )
model.push_to_hub(__snake_case , organization='''apple''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 5 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE__ = True
def __init__(self , **UpperCAmelCase ) -> str:
super().__init__(**UpperCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
_lowercase =kwargs.pop('''encoder''' )
_lowercase =encoder_config.pop('''model_type''' )
_lowercase =kwargs.pop('''decoder''' )
_lowercase =decoder_config.pop('''model_type''' )
_lowercase =AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
_lowercase =AutoConfig.for_model(UpperCAmelCase , **UpperCAmelCase )
_lowercase =True
@classmethod
def __A (cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> PretrainedConfig:
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_lowercase =True
_lowercase =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase )
def __A (self ) -> Optional[Any]:
_lowercase =copy.deepcopy(self.__dict__ )
_lowercase =self.encoder.to_dict()
_lowercase =self.decoder.to_dict()
_lowercase =self.__class__.model_type
return output
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''')
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A (self ) -> float:
return 1e-4
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class lowerCamelCase__ ( lowerCAmelCase):
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
_lowercase =OrderedDict()
_lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_lowercase ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def __A (self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ) -> Mapping[str, Any]:
import torch
_lowercase =OrderedDict()
_lowercase =super().generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
_lowercase , _lowercase =dummy_input['''input_ids'''].shape
_lowercase =(batch, encoder_sequence, self._config.encoder_hidden_size)
_lowercase =dummy_input.pop('''input_ids''' )
_lowercase =dummy_input.pop('''attention_mask''' )
_lowercase =torch.zeros(UpperCAmelCase )
return common_inputs
class lowerCamelCase__ ( lowerCAmelCase):
@property
def __A (self ) -> None:
pass
def __A (self , UpperCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "default" ) -> OnnxConfig:
_lowercase =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase , UpperCAmelCase )
| 5 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''data2vec-vision'''
def __init__(self , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=2_2_4 , UpperCAmelCase=1_6 , UpperCAmelCase=3 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=True , UpperCAmelCase=[3, 5, 7, 1_1] , UpperCAmelCase=[1, 2, 3, 6] , UpperCAmelCase=True , UpperCAmelCase=0.4 , UpperCAmelCase=2_5_6 , UpperCAmelCase=1 , UpperCAmelCase=False , UpperCAmelCase=2_5_5 , **UpperCAmelCase , ) -> Optional[int]:
super().__init__(**UpperCAmelCase )
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =use_mask_token
_lowercase =use_absolute_position_embeddings
_lowercase =use_relative_position_bias
_lowercase =use_shared_relative_position_bias
_lowercase =layer_scale_init_value
_lowercase =drop_path_rate
_lowercase =use_mean_pooling
# decode head attributes (semantic segmentation)
_lowercase =out_indices
_lowercase =pool_scales
# auxiliary head attributes (semantic segmentation)
_lowercase =use_auxiliary_head
_lowercase =auxiliary_loss_weight
_lowercase =auxiliary_channels
_lowercase =auxiliary_num_convs
_lowercase =auxiliary_concat_input
_lowercase =semantic_loss_ignore_index
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''')
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A (self ) -> float:
return 1e-4
| 5 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def UpperCAmelCase_ ( __snake_case , __snake_case = 16 ) -> Any:
"""simple docstring"""
_lowercase =AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowercase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
_lowercase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase =16
elif accelerator.mixed_precision != "no":
_lowercase =8
else:
_lowercase =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowercase =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case )
_lowercase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> List[str]:
"""simple docstring"""
_lowercase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase =config['''lr''']
_lowercase =int(config['''num_epochs'''] )
_lowercase =int(config['''seed'''] )
_lowercase =int(config['''batch_size'''] )
_lowercase =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_lowercase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowercase =batch_size // MAX_GPU_BATCH_SIZE
_lowercase =MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
_lowercase , _lowercase =get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase =model.to(accelerator.device )
# Instantiate optimizer
_lowercase =AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
_lowercase =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase =model(**__snake_case )
_lowercase =outputs.loss
_lowercase =loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase =model(**__snake_case )
_lowercase =outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
_lowercase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowercase =parser.parse_args()
_lowercase ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 5 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = ['''input_features''', '''attention_mask''']
def __init__(self , UpperCAmelCase=8_0 , UpperCAmelCase=1_6_0_0_0 , UpperCAmelCase=8_0 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , **UpperCAmelCase , ) -> Dict:
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
_lowercase =num_mel_bins
_lowercase =do_ceptral_normalize
_lowercase =normalize_means
_lowercase =normalize_vars
_lowercase =True
def __A (self , UpperCAmelCase , ) -> np.ndarray:
_lowercase =waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_lowercase =torch.from_numpy(UpperCAmelCase ).unsqueeze(0 )
_lowercase =ta_kaldi.fbank(UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __A (UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
_lowercase =x[:input_length].mean(axis=0 )
_lowercase =np.subtract(UpperCAmelCase , UpperCAmelCase )
if normalize_vars:
_lowercase =x[:input_length].std(axis=0 )
_lowercase =np.divide(UpperCAmelCase , UpperCAmelCase )
if input_length < x.shape[0]:
_lowercase =padding_value
# make sure array is in float32
_lowercase =x.astype(np.floataa )
return x
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[np.ndarray]:
_lowercase =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase , UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase , UpperCAmelCase )
]
def __call__(self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowercase =isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
_lowercase =is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase =[np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
_lowercase =np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase =[raw_speech]
# extract fbank features
_lowercase =[self._extract_fbank_features(UpperCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
_lowercase =BatchFeature({'''input_features''': features} )
_lowercase =self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
# make sure list is in array format
_lowercase =padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase ):
_lowercase =[np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
_lowercase =padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_lowercase =[np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_lowercase =(
np.array(UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowercase =self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase )
if return_tensors is not None:
_lowercase =padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 5 |
from math import isqrt
def UpperCAmelCase_ ( __snake_case ) -> list[int]:
"""simple docstring"""
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowercase =False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def UpperCAmelCase_ ( __snake_case = 10**8 ) -> int:
"""simple docstring"""
_lowercase =calculate_prime_numbers(max_number // 2 )
_lowercase =0
_lowercase =0
_lowercase =len(__snake_case ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
UpperCAmelCase__ = {'''mobilebert-uncased''': 512}
UpperCAmelCase__ = {}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = MobileBertTokenizer
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Any:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
_lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase ) != tokenize_chinese_chars
):
_lowercase =getattr(UpperCAmelCase , normalizer_state.pop('''type''' ) )
_lowercase =do_lower_case
_lowercase =strip_accents
_lowercase =tokenize_chinese_chars
_lowercase =normalizer_class(**UpperCAmelCase )
_lowercase =do_lower_case
def __A (self , UpperCAmelCase , UpperCAmelCase=None ) -> Any:
_lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
_lowercase =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 5 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 1 |
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
_lowercase =sorted(zip(__snake_case , __snake_case ) , key=lambda __snake_case : x[0] / x[1] , reverse=__snake_case )
_lowercase , _lowercase =[i[0] for i in r], [i[1] for i in r]
_lowercase =list(accumulate(__snake_case ) )
_lowercase =bisect(__snake_case , __snake_case )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =int(__snake_case )
assert noofclusters < len(__snake_case )
# Find out the dimensionality
_lowercase =len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowercase =list(range(len(__snake_case ) ) )
shuffle(__snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowercase =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowercase =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowercase =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(__snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowercase =tf.placeholder('''float64''' , [dim] )
_lowercase =[]
for centroid in centroids:
cent_assigns.append(tf.assign(__snake_case , __snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowercase =[tf.Variable(0 ) for i in range(len(__snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowercase =tf.placeholder('''int32''' )
_lowercase =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(__snake_case , __snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowercase =tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowercase =tf.reduce_mean(__snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowercase =tf.placeholder('''float''' , [dim] )
_lowercase =tf.placeholder('''float''' , [dim] )
_lowercase =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__snake_case , __snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowercase =tf.placeholder('''float''' , [noofclusters] )
_lowercase =tf.argmin(__snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowercase =tf.initialize_all_variables()
# Initialize all variables
sess.run(__snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowercase =100
for _ in range(__snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__snake_case ) ):
_lowercase =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowercase =[
sess.run(__snake_case , feed_dict={va: vect, va: sess.run(__snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowercase =sess.run(
__snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__snake_case ):
# Collect all the vectors assigned to this cluster
_lowercase =[
vectors[i]
for i in range(len(__snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowercase =sess.run(
__snake_case , feed_dict={mean_input: array(__snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowercase =sess.run(__snake_case )
_lowercase =sess.run(__snake_case )
return centroids, assignments
| 5 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 1 |
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> bool:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =len(__snake_case )
_lowercase =[[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowercase =True
for i in range(__snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowercase =True
if a[i].islower():
_lowercase =True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''maskformer-swin'''
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , UpperCAmelCase=2_2_4 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=9_6 , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[3, 6, 1_2, 2_4] , UpperCAmelCase=7 , UpperCAmelCase=4.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Any:
super().__init__(**UpperCAmelCase )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =depths
_lowercase =len(UpperCAmelCase )
_lowercase =num_heads
_lowercase =window_size
_lowercase =mlp_ratio
_lowercase =qkv_bias
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =drop_path_rate
_lowercase =hidden_act
_lowercase =use_absolute_embeddings
_lowercase =layer_norm_eps
_lowercase =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase =int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
_lowercase =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(UpperCAmelCase ) + 1 )]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names )
| 5 |
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCAmelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase_ ( __snake_case , __snake_case=100 , __snake_case=" " ) -> List[str]:
"""simple docstring"""
_lowercase =text.split(__snake_case )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__snake_case ) , __snake_case )]
def UpperCAmelCase_ ( __snake_case ) -> dict:
"""simple docstring"""
_lowercase , _lowercase =[], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__snake_case ):
titles.append(title if title is not None else '''''' )
texts.append(__snake_case )
return {"title": titles, "text": texts}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> dict:
"""simple docstring"""
_lowercase =ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__snake_case , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
_lowercase =ctx_encoder(input_ids.to(device=__snake_case ) , return_dict=__snake_case ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> Dict:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_lowercase =load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_lowercase =dataset.map(__snake_case , batched=__snake_case , num_proc=processing_args.num_proc )
# And compute the embeddings
_lowercase =DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__snake_case )
_lowercase =DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_lowercase =Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
_lowercase =dataset.map(
partial(__snake_case , ctx_encoder=__snake_case , ctx_tokenizer=__snake_case ) , batched=__snake_case , batch_size=processing_args.batch_size , features=__snake_case , )
# And finally save your dataset
_lowercase =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__snake_case )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_lowercase =faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__snake_case )
# And save the index
_lowercase =os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__snake_case )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = field(
default=str(Path(lowerCAmelCase).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''') , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
SCREAMING_SNAKE_CASE__ = field(
default=lowerCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
SCREAMING_SNAKE_CASE__ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=str(Path(lowerCAmelCase).parent / '''test_run''' / '''dummy-kb''') , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = field(
default=lowerCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
SCREAMING_SNAKE_CASE__ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
SCREAMING_SNAKE_CASE__ = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCAmelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 5 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Matrix:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(__snake_case ):
for col in range(__snake_case ):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def UpperCAmelCase_ ( __snake_case ) -> Callable[[int], int]:
"""simple docstring"""
_lowercase =len(__snake_case )
_lowercase =[[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowercase =[[0] for _ in range(__snake_case )]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(__snake_case , __snake_case )
def interpolated_func(__snake_case ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase_ ( __snake_case = question_function , __snake_case = 10 ) -> int:
"""simple docstring"""
_lowercase =[func(__snake_case ) for x_val in range(1 , order + 1 )]
_lowercase =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[int]:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
def __A (self ) -> Optional[int]:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self ) -> Optional[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_lowercase =LlamaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
_lowercase =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Any:
_lowercase =True
_lowercase =LlamaModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
_lowercase =LlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> List[Any]:
_lowercase =True
_lowercase =True
_lowercase =LlamaForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# first forward pass
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase , )
_lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
_lowercase =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , output_hidden_states=UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def __A (self ) -> Any:
_lowercase =self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) =config_and_inputs
_lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Optional[int]:
_lowercase =LlamaModelTester(self )
_lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __A (self ) -> Dict:
self.config_tester.run_common_tests()
def __A (self ) -> Union[str, Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase =type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A (self ) -> Union[str, Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A (self ) -> str:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='''single_label_classification'''
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A (self ) -> Dict:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='''multi_label_classification'''
_lowercase =input_dict['''input_ids''']
_lowercase =input_ids.ne(1 ).to(UpperCAmelCase )
_lowercase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase =LlamaForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def __A (self ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __A (self , UpperCAmelCase ) -> Optional[Any]:
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =ids_tensor([1, 1_0] , config.vocab_size )
_lowercase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase =LlamaModel(UpperCAmelCase )
original_model.to(UpperCAmelCase )
original_model.eval()
_lowercase =original_model(UpperCAmelCase ).last_hidden_state
_lowercase =original_model(UpperCAmelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
_lowercase ={'''type''': scaling_type, '''factor''': 10.0}
_lowercase =LlamaModel(UpperCAmelCase )
scaled_model.to(UpperCAmelCase )
scaled_model.eval()
_lowercase =scaled_model(UpperCAmelCase ).last_hidden_state
_lowercase =scaled_model(UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-5 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> Tuple:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> List[str]:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def __A (self ) -> int:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
# Expected mean on dim = -1
_lowercase =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowercase =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def __A (self ) -> Dict:
_lowercase =[1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
_lowercase =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
_lowercase =model(torch.tensor(UpperCAmelCase ) )
_lowercase =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
_lowercase =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCAmelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def __A (self ) -> int:
_lowercase ='''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
_lowercase ='''Simply put, the theory of relativity states that '''
_lowercase =LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
_lowercase =tokenizer.encode(UpperCAmelCase , return_tensors='''pt''' )
_lowercase =LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCAmelCase )
# greedy generation outputs
_lowercase =model.generate(UpperCAmelCase , max_new_tokens=6_4 , top_p=UpperCAmelCase , temperature=1 , do_sample=UpperCAmelCase )
_lowercase =tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 | 1 |