code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =DownBlockaD # noqa F405
__UpperCAmelCase : List[str] ='''down'''
def snake_case ( self ):
__lowerCAmelCase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =ResnetDownsampleBlockaD # noqa F405
__UpperCAmelCase : int ='''down'''
def snake_case ( self ):
__lowerCAmelCase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =AttnDownBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] ='''down'''
def snake_case ( self ):
__lowerCAmelCase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =CrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : List[str] ='''down'''
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCAmelCase = 32
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =SimpleCrossAttnDownBlockaD # noqa F405
__UpperCAmelCase : str ='''down'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCAmelCase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def snake_case ( self ):
__lowerCAmelCase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =SkipDownBlockaD # noqa F405
__UpperCAmelCase : int ='''down'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_skip_sample=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =AttnSkipDownBlockaD # noqa F405
__UpperCAmelCase : str ='''down'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_skip_sample=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =DownEncoderBlockaD # noqa F405
__UpperCAmelCase : Optional[Any] ='''down'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = {
"in_channels": 32,
"out_channels": 32,
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =AttnDownEncoderBlockaD # noqa F405
__UpperCAmelCase : Dict ='''down'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = {
"in_channels": 32,
"out_channels": 32,
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =UNetMidBlockaD # noqa F405
__UpperCAmelCase : Union[str, Any] ='''mid'''
def snake_case ( self ):
__lowerCAmelCase = {
"in_channels": 32,
"temb_channels": 1_28,
}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =UNetMidBlockaDCrossAttn # noqa F405
__UpperCAmelCase : List[Any] ='''mid'''
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCAmelCase = 32
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCAmelCase : List[str] ='''mid'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_encoder_hidden_states=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCAmelCase = 32
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =UpBlockaD # noqa F405
__UpperCAmelCase : int ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any =ResnetUpsampleBlockaD # noqa F405
__UpperCAmelCase : List[str] ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =CrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Tuple ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCAmelCase = 32
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple =SimpleCrossAttnUpBlockaD # noqa F405
__UpperCAmelCase : Dict ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ , include_encoder_hidden_states=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
__lowerCAmelCase = 32
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =AttnUpBlockaD # noqa F405
__UpperCAmelCase : List[str] ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def snake_case ( self ):
__lowerCAmelCase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple =SkipUpBlockaD # noqa F405
__UpperCAmelCase : str ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =AttnSkipUpBlockaD # noqa F405
__UpperCAmelCase : List[Any] ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =UpDecoderBlockaD # noqa F405
__UpperCAmelCase : List[str] ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = {"in_channels": 32, "out_channels": 32}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(UpperCAmelCase__ )
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =AttnUpDecoderBlockaD # noqa F405
__UpperCAmelCase : Tuple ='''up'''
@property
def snake_case ( self ):
return super().get_dummy_input(include_temb=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = {"in_channels": 32, "out_channels": 32}
__lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
__lowerCAmelCase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(UpperCAmelCase__ )
| 57 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def _a ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 16 ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCamelCase_ : Any ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase_ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
UpperCamelCase_ , padding="longest" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ) -> str:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCamelCase_ ) == "1":
lowerCAmelCase__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
set_seed(UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase__ = os.path.split(UpperCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(UpperCamelCase_ ),
"epoch": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 340 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
a__ = LEDTokenizer
a__ = LEDTokenizerFast
a__ = True
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().setUp()
__magic_name__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__magic_name__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__magic_name__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__magic_name__ = {"""unk_token""": """<unk>"""}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase__ ) )
def _lowercase ( self : Dict , **UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCamelCase__ : Any ) -> int:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__magic_name__ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__magic_name__ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@require_torch
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , UpperCAmelCase__ )
self.assertIn("""attention_mask""" , UpperCAmelCase__ )
self.assertNotIn("""labels""" , UpperCAmelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCAmelCase__ )
@require_torch
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="""pt""" )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = ["""A long paragraph for summarization."""]
__magic_name__ = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = tokenizer(UpperCAmelCase__ , return_tensors="""pt""" )
__magic_name__ = tokenizer(text_target=UpperCAmelCase__ , return_tensors="""pt""" )
__magic_name__ = inputs["""input_ids"""]
__magic_name__ = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__magic_name__ = ["""Summary of the text.""", """Another summary."""]
__magic_name__ = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__magic_name__ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ )
__magic_name__ = [[0] * len(UpperCAmelCase__ ) for x in encoded_output["""input_ids"""]]
__magic_name__ = tokenizer.pad(UpperCAmelCase__ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCAmelCase__ )
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__magic_name__ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__magic_name__ = """A, <mask> AllenNLP sentence."""
__magic_name__ = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
__magic_name__ = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__magic_name__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__magic_name__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
UpperCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 88 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=30, __magic_name__=2, __magic_name__=3, __magic_name__=True, __magic_name__=True, __magic_name__=32, __magic_name__=5, __magic_name__=4, __magic_name__=37, __magic_name__="gelu", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=10, __magic_name__=0.02, __magic_name__=None, __magic_name__=2, ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Tuple = batch_size
UpperCamelCase__ : Any = image_size
UpperCamelCase__ : List[str] = patch_size
UpperCamelCase__ : int = num_channels
UpperCamelCase__ : str = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : int = hidden_size
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : List[str] = scope
UpperCamelCase__ : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : Optional[int] = (image_size // patch_size) ** 2
UpperCamelCase__ : Any = num_patches + 1
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Optional[int] = None
if self.use_labels:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = ViTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = ViTForMaskedImageModeling(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : int = ViTForMaskedImageModeling(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = self.type_sequence_label_size
UpperCamelCase__ : Union[str, Any] = ViTForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase__, labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Optional[Any] = ViTForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,
) : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a : Union[str, Any] = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
a : int = True
a : str = False
a : List[str] = False
a : Optional[int] = False
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = ViTModelTester(self )
UpperCamelCase__ : Tuple = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCamelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__, nn.Linear ) )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Any = model_class(UpperCAmelCase__ )
UpperCamelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = ViTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(UpperCAmelCase__ )
UpperCamelCase__ : str = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase__, return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
UpperCamelCase__ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, UpperCAmelCase__ )
UpperCamelCase__ : List[str] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCamelCase__ : Dict = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(UpperCAmelCase__ )
UpperCamelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''', size=480 )
UpperCamelCase__ : Tuple = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(images=UpperCAmelCase__, return_tensors='''pt''' )
UpperCamelCase__ : int = inputs.pixel_values.to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(UpperCAmelCase__, interpolate_pos_encoding=UpperCAmelCase__ )
# verify the logits
UpperCamelCase__ : Dict = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape, UpperCAmelCase__ )
UpperCamelCase__ : str = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = ViTModel.from_pretrained('''facebook/dino-vits8''', torch_dtype=torch.floataa, device_map='''auto''' )
UpperCamelCase__ : Optional[Any] = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase__, return_tensors='''pt''' )
UpperCamelCase__ : Optional[int] = inputs.pixel_values.to(UpperCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCamelCase__ : Any = model(UpperCAmelCase__ )
| 201 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase__: Optional[Any] = Vector()
def _snake_case ( self ):
lowercase__: Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase__ ) , '''(0,0,0,0,0,1)''' )
def _snake_case ( self ):
lowercase__: Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase__ ) , 4 )
def _snake_case ( self ):
lowercase__: Any = Vector([1, 2] )
lowercase__: List[str] = Vector([1, 2, 3, 4, 5] )
lowercase__: Any = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase__: str = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _snake_case ( self ):
lowercase__: str = Vector([1, 2, 3] )
lowercase__: Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _snake_case ( self ):
lowercase__: Union[str, Any] = Vector([1, 2, 3] )
lowercase__: Tuple = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _snake_case ( self ):
lowercase__: Any = Vector([1, 2, 3] )
lowercase__: str = Vector([2, -1, 4] ) # for test of dot product
lowercase__: List[str] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def _snake_case ( self ):
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def _snake_case ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def _snake_case ( self ):
lowercase__: str = Vector([1, 2, 3] )
lowercase__: str = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase__ , UpperCAmelCase__ ) ) , '''(3,4,7)''' )
def _snake_case ( self ):
lowercase__: Any = Vector([1, 0, 0, 0, 0, 0] )
lowercase__: Optional[int] = x.copy()
self.assertEqual(str(UpperCAmelCase__ ) , str(UpperCAmelCase__ ) )
def _snake_case ( self ):
lowercase__: List[str] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase__ ) , '''(0,1,0)''' )
def _snake_case ( self ):
lowercase__: Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCAmelCase__ ) )
def _snake_case ( self ):
lowercase__: Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _snake_case ( self ):
lowercase__: Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _snake_case ( self ):
lowercase__: Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _snake_case ( self ):
lowercase__: Union[str, Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase__: List[Any] = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def _snake_case ( self ):
lowercase__: Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCAmelCase__ ) )
def _snake_case ( self ):
lowercase__: Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _snake_case ( self ):
lowercase__: Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def _snake_case ( self ):
lowercase__: Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__: Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def _snake_case ( self ):
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 177 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if not postfix_notation:
return 0
__lowerCAmelCase: Optional[int] = {"+", "-", "*", "/"}
__lowerCAmelCase: Tuple = []
for token in postfix_notation:
if token in operations:
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__SCREAMING_SNAKE_CASE ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( __lowercase ):
@staticmethod
@abstractmethod
def a ( _lowercase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def a ( self : List[Any] ):
raise NotImplementedError()
| 332 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = k_size // 2
_UpperCAmelCase , _UpperCAmelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_UpperCAmelCase = 1 / (2 * pi * sigma) * exp(-(square(_SCREAMING_SNAKE_CASE ) + square(_SCREAMING_SNAKE_CASE )) / (2 * square(_SCREAMING_SNAKE_CASE )) )
return g
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.shape[0], image.shape[1]
# dst image height and width
_UpperCAmelCase = height - k_size + 1
_UpperCAmelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_UpperCAmelCase = zeros((dst_height * dst_width, k_size * k_size) )
_UpperCAmelCase = 0
for i, j in product(range(_SCREAMING_SNAKE_CASE ) , range(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase = ravel(image[i : i + k_size, j : j + k_size] )
_UpperCAmelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
_UpperCAmelCase = gen_gaussian_kernel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ravel(_SCREAMING_SNAKE_CASE )
# reshape and get the dst image
_UpperCAmelCase = dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
return dst
if __name__ == "__main__":
# read original image
__A : Optional[int] = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
__A : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A : Dict = gaussian_filter(gray, 3, sigma=1)
__A : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 260 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
from typing import List
import numpy as np
def __lowerCamelCase ( lowerCamelCase__ : dict ):
'''simple docstring'''
lowerCamelCase = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n"""
+ """\n""".join(f'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
lowerCamelCase = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = []
for group_idx in range(lowerCamelCase__ ):
lowerCamelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase = range(lowerCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase__ )
return shards_indices_per_group
def __lowerCamelCase ( lowerCamelCase__ : dict , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = _number_of_shards_in_gen_kwargs(lowerCamelCase__ )
if num_shards == 1:
return [dict(lowerCamelCase__ )]
else:
lowerCamelCase = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase__ ) )
]
def __lowerCamelCase ( lowerCamelCase__ : List[dict] ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowerCamelCase ( lowerCamelCase__ : np.random.Generator , lowerCamelCase__ : dict ):
'''simple docstring'''
lowerCamelCase = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
lowerCamelCase = {}
for size in list_sizes:
lowerCamelCase = list(range(lowerCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase = dict(lowerCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]]
return shuffled_kwargs
| 252 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCamelCase__ ( __lowercase ):
a__ : torch.FloatTensor
a__ : Optional[torch.FloatTensor] = None
def UpperCamelCase__ ( lowercase__ : Any , lowercase__ : List[Any]=0.9_99 , lowercase__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ : Optional[int] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case : int = []
for i in range(lowercase__ ):
snake_case : str = i / num_diffusion_timesteps
snake_case : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class lowerCamelCase__ ( __lowercase , __lowercase ):
a__ : Union[str, Any] = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE = 1_000 , SCREAMING_SNAKE_CASE = 0.00_01 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = "linear" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = "epsilon" , SCREAMING_SNAKE_CASE = 1.0 , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , UpperCAmelCase__ ) is not None:
snake_case : List[str] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
snake_case : Dict = kwargs["set_alpha_to_one"]
if trained_betas is not None:
snake_case : str = torch.tensor(UpperCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case : Optional[int] = torch.linspace(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case : Any = betas_for_alpha_bar(UpperCAmelCase__ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case : Union[str, Any] = 1.0 - self.betas
snake_case : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
snake_case : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
snake_case : Optional[int] = 1.0
# setable values
snake_case : Optional[Any] = None
snake_case : Optional[Any] = torch.from_numpy(np.arange(0 , UpperCAmelCase__ ).copy().astype(np.intaa ) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
snake_case : str = num_inference_steps
snake_case : Any = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case : Dict = (np.arange(0 , UpperCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
snake_case : List[str] = torch.from_numpy(UpperCAmelCase__ ).to(UpperCAmelCase__ )
self.timesteps += self.config.steps_offset
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
snake_case : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
snake_case : Dict = self.alphas_cumprod[timestep]
snake_case : Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
snake_case : Tuple = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
snake_case : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
snake_case : Tuple = model_output
elif self.config.prediction_type == "sample":
snake_case : str = model_output
snake_case : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
snake_case : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
snake_case : Any = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
snake_case : Dict = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : int = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase__ , pred_original_sample=UpperCAmelCase__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 148 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__lowerCAmelCase = {"unk_token": "<unk>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
__lowerCAmelCase = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , UpperCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self , **__a ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCAmelCase__ )
def snake_case ( self , **__a ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCAmelCase__ )
def snake_case ( self , **__a ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase__ )
__lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=UpperCAmelCase__ )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors="np" )
__lowerCAmelCase = processor(images=UpperCAmelCase__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = processor(text=UpperCAmelCase__ , return_tensors="np" )
__lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def snake_case ( self ):
__lowerCAmelCase = "google/owlvit-base-patch32"
__lowerCAmelCase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = ["cat", "nasa badge"]
__lowerCAmelCase = processor(text=UpperCAmelCase__ )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def snake_case ( self ):
__lowerCAmelCase = "google/owlvit-base-patch32"
__lowerCAmelCase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = [["cat", "nasa badge"], ["person"]]
__lowerCAmelCase = processor(text=UpperCAmelCase__ )
__lowerCAmelCase = 16
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = max([len(UpperCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def snake_case ( self ):
__lowerCAmelCase = "google/owlvit-base-patch32"
__lowerCAmelCase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
__lowerCAmelCase = ["cat", "nasa badge"]
__lowerCAmelCase = processor(text=UpperCAmelCase__ )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs["input_ids"]
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=UpperCAmelCase__ , query_images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(UpperCAmelCase__ )
__lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 57 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
def _a ( UpperCamelCase_ : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = len(UpperCamelCase_ )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase__ = arr[mi::-1] + arr[mi + 1 : len(UpperCamelCase_ )]
# Reverse whole list
lowerCAmelCase__ = arr[cur - 1 :: -1] + arr[cur : len(UpperCamelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 340 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowerCAmelCase : Tuple = logging.getLogger(__name__)
class UpperCAmelCase_ ( __lowercase ):
'''simple docstring'''
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None ) -> str:
"""simple docstring"""
__magic_name__ = self.layer[current_layer](UpperCAmelCase__ , UpperCAmelCase__ , head_mask[current_layer] )
__magic_name__ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
__magic_name__ = BertEncoderWithPabee(UpperCAmelCase__ )
self.init_weights()
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = 0
def _lowercase ( self : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = threshold
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = patience
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = 0
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = self.inference_layers_num / self.inference_instances_num
__magic_name__ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=False , ) -> List[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
__magic_name__ = input_ids.size()
elif inputs_embeds is not None:
__magic_name__ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
__magic_name__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__magic_name__ = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
__magic_name__ = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__magic_name__ = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__magic_name__ , __magic_name__ , __magic_name__ = encoder_hidden_states.size()
__magic_name__ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__magic_name__ = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
__magic_name__ = self.invert_attention_mask(UpperCAmelCase__ )
else:
__magic_name__ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__magic_name__ = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
__magic_name__ = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
__magic_name__ = embedding_output
if self.training:
__magic_name__ = []
for i in range(self.config.num_hidden_layers ):
__magic_name__ = self.encoder.adaptive_forward(
UpperCAmelCase__ , current_layer=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
__magic_name__ = self.pooler(UpperCAmelCase__ )
__magic_name__ = output_layers[i](output_dropout(UpperCAmelCase__ ) )
res.append(UpperCAmelCase__ )
elif self.patience == 0: # Use all layers for inference
__magic_name__ = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__magic_name__ = self.pooler(encoder_outputs[0] )
__magic_name__ = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase__ )]
else:
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__magic_name__ = self.encoder.adaptive_forward(
UpperCAmelCase__ , current_layer=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
__magic_name__ = self.pooler(UpperCAmelCase__ )
__magic_name__ = output_layers[i](UpperCAmelCase__ )
if regression:
__magic_name__ = logits.detach()
if patient_result is not None:
__magic_name__ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__magic_name__ = 0
else:
__magic_name__ = logits.detach().argmax(dim=1 )
if patient_result is not None:
__magic_name__ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase__ ) ):
patient_counter += 1
else:
__magic_name__ = 0
__magic_name__ = logits
if patient_counter == self.patience:
break
__magic_name__ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(UpperCAmelCase__ )
__magic_name__ = config.num_labels
__magic_name__ = BertModelWithPabee(UpperCAmelCase__ )
__magic_name__ = nn.Dropout(config.hidden_dropout_prob )
__magic_name__ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , ) -> Any:
"""simple docstring"""
__magic_name__ = self.bert(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__magic_name__ = (logits[-1],)
if labels is not None:
__magic_name__ = None
__magic_name__ = 0
for ix, logits_item in enumerate(UpperCAmelCase__ ):
if self.num_labels == 1:
# We are doing regression
__magic_name__ = MSELoss()
__magic_name__ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__magic_name__ = CrossEntropyLoss()
__magic_name__ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__magic_name__ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__magic_name__ = (total_loss / total_weights,) + outputs
return outputs
| 88 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
UpperCamelCase__ : Dict = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
UpperCamelCase__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
UpperCamelCase__ : Dict = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices." )
UpperCamelCase__ : Dict = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
UpperCamelCase__ : Any = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1, cuda_visible_devices='''0,1''' ):
execute_subprocess_async(UpperCAmelCase__, env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = (accelerator.state.process_index + 2, 10)
UpperCAmelCase_ = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase_ = ''
UpperCAmelCase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 201 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class UpperCAmelCase (__lowercase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = '''dpr'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase = 0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase__: List[str] = vocab_size
lowercase__: str = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: Any = hidden_act
lowercase__: Optional[int] = intermediate_size
lowercase__: Union[str, Any] = hidden_dropout_prob
lowercase__: List[str] = attention_probs_dropout_prob
lowercase__: str = max_position_embeddings
lowercase__: Union[str, Any] = type_vocab_size
lowercase__: str = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: Optional[int] = projection_dim
lowercase__: Union[str, Any] = position_embedding_type
| 177 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 217 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : str , _lowercase : Optional[int] , _lowercase : Optional[int]=13 , _lowercase : Optional[Any]=3 , _lowercase : Optional[Any]=2_24 , _lowercase : Optional[Any]=30 , _lowercase : Any=4_00 , _lowercase : int=True , _lowercase : Optional[int]=None , _lowercase : Any=True , _lowercase : Optional[Any]=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , ):
__UpperCAmelCase = size if size is not None else {'''height''': 18, '''width''': 18}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = image_size
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
def a ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
a__ : Dict = ViTImageProcessor if is_vision_available() else None
def a ( self : List[str] ):
__UpperCAmelCase = EfficientFormerImageProcessorTester(self )
@property
def a ( self : List[Any] ):
return self.image_proc_tester.prepare_image_processor_dict()
def a ( self : List[Any] ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
def a ( self : int ):
pass
def a ( self : Union[str, Any] ):
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a ( self : str ):
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a ( self : List[Any] ):
# Initialize image_processor
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 332 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 260 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
import warnings
from .generation import TFGenerationMixin
class A_ ( __lowercase ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __lowercase , )
| 73 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
lowerCamelCase = -1
lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCamelCase = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
lowerCamelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase = TextStreamer(UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
lowerCamelCase = -1
lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCamelCase = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
lowerCamelCase = tokenizer.decode(greedy_ids[0] )
lowerCamelCase = TextIteratorStreamer(UpperCAmelCase__ )
lowerCamelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCamelCase = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
lowerCamelCase = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
lowerCamelCase = -1
lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCamelCase = model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
lowerCamelCase = greedy_ids[:, input_ids.shape[1] :]
lowerCamelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCamelCase = TextStreamer(UpperCAmelCase__ , skip_prompt=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCamelCase = cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCamelCase = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(UpperCAmelCase__ )
lowerCamelCase = -1
lowerCamelCase = torch.ones((1, 5) , device=UpperCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCamelCase = TextStreamer(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCamelCase = cs.out[:-1] # Remove the final "\n"
lowerCamelCase = tokenizer(UpperCAmelCase__ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCamelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(UpperCAmelCase__ )
lowerCamelCase = -1
lowerCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
lowerCamelCase = TextIteratorStreamer(UpperCAmelCase__ , timeout=0.001 )
lowerCamelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCamelCase = Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase__ ):
lowerCamelCase = """"""
for new_text in streamer:
streamer_text += new_text
| 252 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__ ( unittest.TestCase ):
@parameterized.expand([(None,), ("foo.json",)] )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : int = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ )
snake_case : Any = GenerationConfig.from_pretrained(UpperCAmelCase__ , config_name=UpperCAmelCase__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase__ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = AutoConfig.from_pretrained("gpt2" )
snake_case : List[str] = GenerationConfig.from_model_config(UpperCAmelCase__ )
snake_case : Optional[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = GenerationConfig()
snake_case : List[str] = {
"max_new_tokens": 1_024,
"foo": "bar",
}
snake_case : int = copy.deepcopy(UpperCAmelCase__ )
snake_case : str = generation_config.update(**UpperCAmelCase__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase__ , {"foo": "bar"} )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = GenerationConfig()
snake_case : Union[str, Any] = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase__ )
snake_case : Optional[Any] = GenerationConfig.from_pretrained(UpperCAmelCase__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
snake_case : Optional[Any] = GenerationConfig.from_model_config(UpperCAmelCase__ )
assert not hasattr(UpperCAmelCase__ , "foo" ) # no new kwargs should be initialized if from config
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase__ )
self.assertEqual(default_config.num_beams , 1 )
snake_case : Optional[Any] = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase__ )
snake_case : Dict = GenerationConfig.from_pretrained(UpperCAmelCase__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase ):
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
snake_case : List[Any] = TOKEN
HfFolder.save_token(UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
snake_case : Optional[int] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase__ , repo_id="test-generation-config" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
snake_case : str = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = GenerationConfig(
do_sample=UpperCAmelCase__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
snake_case : Optional[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase__ , repo_id="valid_org/test-generation-config-org" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
snake_case : Dict = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
| 148 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( __lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any =DebertaTokenizer
__UpperCAmelCase : Union[str, Any] =True
__UpperCAmelCase : List[Any] =DebertaTokenizerFast
def snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
__lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowerCAmelCase = {"unk_token": "[UNK]"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def snake_case ( self , **__a ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def snake_case ( self , __a ):
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = "lower newer"
return input_text, output_text
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer("Hello" , "World" )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , UpperCAmelCase__ )
@slow
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
__lowerCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase__ )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase__ )
__lowerCAmelCase = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
__lowerCAmelCase = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case ( self ):
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained("microsoft/deberta-base" )
__lowerCAmelCase = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
__lowerCAmelCase = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ )
__lowerCAmelCase = [tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
__lowerCAmelCase = {
"input_ids": [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , UpperCAmelCase__ )
for expected, decoded in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 57 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import os
def _a ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = os.path.join(os.path.dirname(UpperCamelCase_ ) , "num.txt" )
with open(UpperCamelCase_ ) as file_hand:
return str(sum(int(UpperCamelCase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 340 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : Dict = datasets.load_iris()
__lowerCAmelCase : Union[str, Any] = np.array(data['data'])
__lowerCAmelCase : int = np.array(data['target'])
__lowerCAmelCase : Union[str, Any] = data['target_names']
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = train_test_split(X, y)
def a__ ( A_, A_ ):
'''simple docstring'''
return np.linalg.norm(np.array(A_ ) - np.array(A_ ) )
def a__ ( A_, A_, A_, A_, A_=5 ):
'''simple docstring'''
__magic_name__ = zip(A_, A_ )
# List of distances of all points from the point to be classified
__magic_name__ = []
for data_point in data:
__magic_name__ = euclidean_distance(data_point[0], A_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__magic_name__ = [i[1] for i in sorted(A_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__magic_name__ = Counter(A_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 88 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] ) -> List[Any]:
UpperCamelCase__ : Any = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCamelCase__ : Optional[int] = 6
UpperCamelCase__ : Optional[Any] = 128
UpperCamelCase__ : List[str] = (2, 2, 18, 2)
UpperCamelCase__ : str = (4, 8, 16, 32)
elif "large" in model_name:
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : int = 192
UpperCamelCase__ : Any = (2, 2, 18, 2)
UpperCamelCase__ : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
UpperCamelCase__ : List[Any] = window_size
UpperCamelCase__ : Dict = embed_dim
UpperCamelCase__ : Tuple = depths
UpperCamelCase__ : Any = num_heads
return config
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Dict:
if "encoder.mask_token" in name:
UpperCamelCase__ : List[Any] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
UpperCamelCase__ : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
UpperCamelCase__ : Dict = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
UpperCamelCase__ : Dict = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCamelCase__ : Tuple = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCamelCase__ : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCamelCase__ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase__ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase__ : Any = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCamelCase__ : Union[str, Any] = '''layernorm.weight'''
if name == "encoder.norm.bias":
UpperCamelCase__ : Dict = '''layernorm.bias'''
if "decoder" in name:
pass
else:
UpperCamelCase__ : List[str] = '''swin.''' + name
return name
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Any ) -> str:
for key in orig_state_dict.copy().keys():
UpperCamelCase__ : List[str] = orig_state_dict.pop(__UpperCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCamelCase__ : Optional[Any] = key.split('''.''' )
UpperCamelCase__ : List[str] = int(key_split[2] )
UpperCamelCase__ : Tuple = int(key_split[4] )
UpperCamelCase__ : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCamelCase__ : Optional[int] = val[:dim, :]
UpperCamelCase__ : List[str] = val[
dim : dim * 2, :
]
UpperCamelCase__ : Tuple = val[-dim:, :]
else:
UpperCamelCase__ : Union[str, Any] = val[
:dim
]
UpperCamelCase__ : int = val[
dim : dim * 2
]
UpperCamelCase__ : str = val[
-dim:
]
else:
UpperCamelCase__ : Optional[int] = val
return orig_state_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Dict ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCamelCase__ : Optional[Any] = get_swin_config(__UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = SwinForMaskedImageModeling(__UpperCAmelCase )
model.eval()
UpperCamelCase__ : Optional[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Dict = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
UpperCamelCase__ : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**__UpperCAmelCase ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 201 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.encodec")
__A = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
__A = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
__A = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
__A = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
__A = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
for attribute in key.split('''.''' ):
lowercase__: List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowercase__: str = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowercase__: Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__: Dict = value
elif weight_type == "weight_g":
lowercase__: Tuple = value
elif weight_type == "weight_v":
lowercase__: Dict = value
elif weight_type == "bias":
lowercase__: Dict = value
elif weight_type == "running_mean":
lowercase__: List[Any] = value
elif weight_type == "running_var":
lowercase__: List[Any] = value
elif weight_type == "num_batches_tracked":
lowercase__: str = value
elif weight_type == "weight_ih_l0":
lowercase__: Dict = value
elif weight_type == "weight_hh_l0":
lowercase__: Any = value
elif weight_type == "bias_ih_l0":
lowercase__: List[str] = value
elif weight_type == "bias_hh_l0":
lowercase__: Optional[Any] = value
elif weight_type == "weight_ih_l1":
lowercase__: Optional[int] = value
elif weight_type == "weight_hh_l1":
lowercase__: List[str] = value
elif weight_type == "bias_ih_l1":
lowercase__: int = value
elif weight_type == "bias_hh_l1":
lowercase__: str = value
else:
lowercase__: int = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase__, lowercase__: Any = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: List[str] = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowercase__: int = MAPPING_24K
elif model_name == "encodec_48khz":
lowercase__: Optional[int] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__UpperCAmelCase , __UpperCAmelCase ):
logger.info(F"""{name} was ignored""" )
continue
lowercase__: Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowercase__, lowercase__: Optional[Any] = key.split('''.*.''' )
if prefix in name and suffix in name:
lowercase__: Any = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
lowercase__: List[Any] = True
if "*" in mapped_key:
lowercase__: Dict = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
lowercase__: Dict = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
lowercase__: str = '''weight_g'''
elif "weight_v" in name:
lowercase__: Union[str, Any] = '''weight_v'''
elif "weight_ih_l0" in name:
lowercase__: Tuple = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
lowercase__: List[Any] = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
lowercase__: Dict = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
lowercase__: List[Any] = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
lowercase__: Optional[int] = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
lowercase__: Any = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
lowercase__: List[str] = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
lowercase__: int = '''bias_hh_l1'''
elif "bias" in name:
lowercase__: Tuple = '''bias'''
elif "weight" in name:
lowercase__: int = '''weight'''
elif "running_mean" in name:
lowercase__: Optional[Any] = '''running_mean'''
elif "running_var" in name:
lowercase__: List[str] = '''running_var'''
elif "num_batches_tracked" in name:
lowercase__: Tuple = '''num_batches_tracked'''
else:
lowercase__: Union[str, Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> int:
if config_path is not None:
lowercase__: Optional[int] = EncodecConfig.from_pretrained(__UpperCAmelCase )
else:
lowercase__: List[Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowercase__: str = [8, 5, 4, 4]
lowercase__: Union[str, Any] = [2.2]
lowercase__: List[Any] = 6_4
lowercase__: Any = 3_2_0_0_0
lowercase__: Union[str, Any] = 2_0_4_8
lowercase__: Tuple = False
lowercase__: List[Any] = False
lowercase__: Any = False
elif model_name == "encodec_48khz":
lowercase__: Optional[Any] = [8, 5, 4, 2]
lowercase__: int = [3.0, 6.0, 1_2.0, 2_4.0]
lowercase__: Union[str, Any] = 4_8_0_0_0
lowercase__: Optional[int] = 2
lowercase__: Tuple = False
lowercase__: Tuple = '''time_group_norm'''
lowercase__: List[str] = True
lowercase__: int = 1.0
lowercase__: Optional[int] = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowercase__: List[str] = EncodecModel(__UpperCAmelCase )
lowercase__: List[Any] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__UpperCAmelCase )
lowercase__: str = torch.load(__UpperCAmelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowercase__: str = original_checkpoint['''best_state''']
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 177 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class snake_case ( __lowercase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = '''table-transformer'''
SCREAMING_SNAKE_CASE_ : List[str] = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : List[str]=8 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : Tuple=2_0_4_8 , UpperCamelCase__ : int=8 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]="relu" , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Union[str, Any]=1.0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]="sine" , UpperCamelCase__ : Optional[int]="resnet50" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : int=1 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=0.1 , **UpperCamelCase__ : Union[str, Any] , )-> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
__lowerCAmelCase: Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
__lowerCAmelCase: str = backbone_config.get("model_type")
__lowerCAmelCase: Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase: List[Any] = config_class.from_dict(UpperCAmelCase__)
# set timm attributes to None
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = None, None, None
__lowerCAmelCase: Union[str, Any] = use_timm_backbone
__lowerCAmelCase: List[Any] = backbone_config
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: List[Any] = num_queries
__lowerCAmelCase: Tuple = d_model
__lowerCAmelCase: Any = encoder_ffn_dim
__lowerCAmelCase: Optional[Any] = encoder_layers
__lowerCAmelCase: Any = encoder_attention_heads
__lowerCAmelCase: Optional[int] = decoder_ffn_dim
__lowerCAmelCase: str = decoder_layers
__lowerCAmelCase: Union[str, Any] = decoder_attention_heads
__lowerCAmelCase: Dict = dropout
__lowerCAmelCase: Optional[int] = attention_dropout
__lowerCAmelCase: Union[str, Any] = activation_dropout
__lowerCAmelCase: str = activation_function
__lowerCAmelCase: Optional[Any] = init_std
__lowerCAmelCase: List[Any] = init_xavier_std
__lowerCAmelCase: List[Any] = encoder_layerdrop
__lowerCAmelCase: int = decoder_layerdrop
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: str = auxiliary_loss
__lowerCAmelCase: Dict = position_embedding_type
__lowerCAmelCase: Optional[int] = backbone
__lowerCAmelCase: List[Any] = use_pretrained_backbone
__lowerCAmelCase: str = dilation
# Hungarian matcher
__lowerCAmelCase: List[str] = class_cost
__lowerCAmelCase: Union[str, Any] = bbox_cost
__lowerCAmelCase: Optional[int] = giou_cost
# Loss coefficients
__lowerCAmelCase: Dict = mask_loss_coefficient
__lowerCAmelCase: List[Any] = dice_loss_coefficient
__lowerCAmelCase: str = bbox_loss_coefficient
__lowerCAmelCase: Optional[int] = giou_loss_coefficient
__lowerCAmelCase: Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def lowercase_ ( self : int)-> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowercase_ ( self : Optional[Any])-> int:
'''simple docstring'''
return self.d_model
class snake_case ( __lowercase ):
SCREAMING_SNAKE_CASE_ : Dict = version.parse("""1.11""" )
@property
def lowercase_ ( self : Any)-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
])
@property
def lowercase_ ( self : Optional[int])-> float:
'''simple docstring'''
return 1e-5
@property
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
return 1_2
| 217 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : str = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A : Optional[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class _a ( __lowercase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def lowercase__ ( self : int )->int:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] )->Union[str, Any]:
_UpperCAmelCase = '''this is a test'''
_UpperCAmelCase = '''this is a test'''
return input_text, output_text
def lowercase__ ( self : List[str] )->List[Any]:
_UpperCAmelCase = '''<pad>'''
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(UpperCAmelCase__ ) , 3_0_0_0_0 )
def lowercase__ ( self : str )->List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def lowercase__ ( self : Tuple )->Union[str, Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowercase__ ( self : str )->str:
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
_UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase__ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [4_8, 2_5, 2_1, 1_2_8_9] )
_UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase__ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase__ )
_UpperCAmelCase = tokenizer.encode('''sequence builders''' )
_UpperCAmelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase__ ( self : Dict )->int:
# fmt: off
_UpperCAmelCase = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 260 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : List[str] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCamelCase : Any = MaskFormerConfig(backbone_config=lowerCamelCase__ )
__lowerCamelCase : Tuple = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCamelCase : int = 8_4_7
__lowerCamelCase : str = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCamelCase : Tuple = 1_5_0
__lowerCamelCase : Any = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCamelCase : Union[str, Any] = 1_7_1
__lowerCamelCase : Union[str, Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCamelCase : Dict = 1_3_3
__lowerCamelCase : str = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCamelCase : str = 1_9
__lowerCamelCase : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCamelCase : str = 6_5
__lowerCamelCase : Union[str, Any] = 'mapillary-vistas-id2label.json'
__lowerCamelCase : Tuple = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Any = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.attn.proj.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.norm2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.layers.{i}.downsample.reduction.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.weight", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.layers.{i}.downsample.norm.bias", F"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"sem_seg_head.adapter_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((F"sem_seg_head.adapter_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.weight", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((F"sem_seg_head.layer_{source_index}.norm.bias", F"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", F"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", F"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", F"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", F"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", F"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", F"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", F"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.weight", F"mask_embedder.{i}.0.weight") )
rename_keys.append((F"sem_seg_head.predictor.mask_embed.layers.{i}.bias", F"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : str = dct.pop(lowerCamelCase__ )
__lowerCamelCase : List[Any] = val
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCamelCase : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCamelCase : Optional[int] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__lowerCamelCase : List[Any] = state_dict.pop(F"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : str = in_proj_weight[:dim, :]
__lowerCamelCase : Optional[Any] = in_proj_bias[: dim]
__lowerCamelCase : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowerCamelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
__lowerCamelCase : str = in_proj_weight[
-dim :, :
]
__lowerCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
# fmt: off
__lowerCamelCase : Tuple = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCamelCase : List[str] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__lowerCamelCase : int = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Union[str, Any] = in_proj_weight[: hidden_size, :]
__lowerCamelCase : str = in_proj_bias[:config.hidden_size]
__lowerCamelCase : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCamelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCamelCase : List[str] = in_proj_weight[-hidden_size :, :]
__lowerCamelCase : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCamelCase : Optional[Any] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__lowerCamelCase : List[str] = state_dict.pop(F"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : List[Any] = in_proj_weight[: hidden_size, :]
__lowerCamelCase : Tuple = in_proj_bias[:config.hidden_size]
__lowerCamelCase : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCamelCase : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCamelCase : str = in_proj_weight[-hidden_size :, :]
__lowerCamelCase : List[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
__lowerCamelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase : List[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> int:
__lowerCamelCase : Union[str, Any] = get_maskformer_config(lowerCamelCase__ )
# load original state_dict
with open(lowerCamelCase__ , 'rb' ) as f:
__lowerCamelCase : Union[str, Any] = pickle.load(lowerCamelCase__ )
__lowerCamelCase : Any = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCamelCase : Any = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_swin_q_k_v(lowerCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase__ , lowerCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCamelCase : List[str] = torch.from_numpy(lowerCamelCase__ )
# load 🤗 model
__lowerCamelCase : int = MaskFormerForInstanceSegmentation(lowerCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase__ , param.shape )
__lowerCamelCase , __lowerCamelCase : int = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase__ ) == 0, F"Unexpected keys: {unexpected_keys}"
# verify results
__lowerCamelCase : Union[str, Any] = prepare_img()
if "vistas" in model_name:
__lowerCamelCase : Dict = 6_5
elif "cityscapes" in model_name:
__lowerCamelCase : Any = 6_5_5_3_5
else:
__lowerCamelCase : Union[str, Any] = 2_5_5
__lowerCamelCase : Any = True if 'ade' in model_name else False
__lowerCamelCase : str = MaskFormerImageProcessor(ignore_index=lowerCamelCase__ , reduce_labels=lowerCamelCase__ )
__lowerCamelCase : List[str] = image_processor(lowerCamelCase__ , return_tensors='pt' )
__lowerCamelCase : Union[str, Any] = model(**lowerCamelCase__ )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCamelCase : Optional[Any] = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"nielsr/{model_name}" )
image_processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 73 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
super().__init__()
lowerCamelCase = nn.Linear(3 , 4 )
lowerCamelCase = nn.BatchNormad(4 )
lowerCamelCase = nn.Linear(4 , 5 )
def __A ( self , A ) -> Any:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , model.state_dict() )
lowerCamelCase = os.path.join(UpperCAmelCase__ , """index.json""" )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCamelCase = os.path.join(UpperCAmelCase__ , F'{key}.dat' )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCamelCase = torch.randn(2 , 3 , dtype=UpperCAmelCase__ )
with TemporaryDirectory() as tmp_dir:
lowerCamelCase = offload_weight(UpperCAmelCase__ , """weight""" , UpperCAmelCase__ , {} )
lowerCamelCase = os.path.join(UpperCAmelCase__ , """weight.dat""" )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
self.assertDictEqual(UpperCAmelCase__ , {"""weight""": {"""shape""": [2, 3], """dtype""": str(UpperCAmelCase__ ).split(""".""" )[1]}} )
lowerCamelCase = load_offloaded_weight(UpperCAmelCase__ , index["""weight"""] )
self.assertTrue(torch.equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = ModelForTest()
lowerCamelCase = model.state_dict()
lowerCamelCase = {k: v for k, v in state_dict.items() if """linear2""" not in k}
lowerCamelCase = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
lowerCamelCase = {k: v for k, v in state_dict.items() if """weight""" in k}
lowerCamelCase = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
# Duplicates are removed
lowerCamelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
lowerCamelCase = extract_submodules_state_dict(UpperCAmelCase__ , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCAmelCase__ , {"""a.1""": 0, """a.2""": 2} )
lowerCamelCase = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
lowerCamelCase = extract_submodules_state_dict(UpperCAmelCase__ , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCAmelCase__ , {"""a.1.a""": 0, """a.2.a""": 2} )
| 252 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = data
# Initialize hash values
snake_case : Optional[int] = [
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
snake_case : int = [
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
snake_case : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : int = b"\x80" + (b"\x00" * (63 - (len(UpperCAmelCase__ ) + 8) % 64))
snake_case : Optional[Any] = struct.pack(">Q" , (len(UpperCAmelCase__ ) * 8) )
return data + padding + big_endian_integer
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case : Any = list(struct.unpack(">16L" , UpperCAmelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case : Dict = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
snake_case : Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
snake_case : Dict = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
snake_case : Any = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
snake_case : Tuple = self.ror(UpperCAmelCase__ , 6 ) ^ self.ror(UpperCAmelCase__ , 11 ) ^ self.ror(UpperCAmelCase__ , 25 )
snake_case : Optional[Any] = (e & f) ^ ((~e & 0xffff_ffff) & g)
snake_case : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
snake_case : Any = self.ror(UpperCAmelCase__ , 2 ) ^ self.ror(UpperCAmelCase__ , 13 ) ^ self.ror(UpperCAmelCase__ , 22 )
snake_case : str = (a & b) ^ (a & c) ^ (b & c)
snake_case : Union[str, Any] = (sa + maj) % 0x1_0000_0000
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case : List[Any] = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
snake_case : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case : Optional[Any] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
snake_case : List[str] = "".join([hex(UpperCAmelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return 0xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
import hashlib
snake_case : List[str] = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(UpperCAmelCase__ ).hash , hashlib.shaaaa(UpperCAmelCase__ ).hexdigest() )
def UpperCamelCase__ ( ):
import doctest
doctest.testmod()
snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
snake_case : Dict = parser.parse_args()
snake_case : Dict = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
snake_case : List[Any] = f.read()
else:
snake_case : Tuple = bytes(lowercase__ , "utf-8" )
print(SHAaaa(lowercase__ ).hash )
if __name__ == "__main__":
main()
| 148 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Dict = 1_6
A : Dict = 3_2
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = 16 ):
'''simple docstring'''
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
_UpperCamelCase , padding="longest" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase , drop_last=_UpperCamelCase )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["lr"]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**_UpperCamelCase )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 57 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
def _a ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _a ( UpperCamelCase_ : dict[int, list[int]] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCamelCase_ ) # No of vertices in graph
lowerCAmelCase__ = [0] * n
lowerCAmelCase__ = [False] * n
def dfs(UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : str ):
lowerCAmelCase__ = True
lowerCAmelCase__ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , id_ )
lowerCAmelCase__ = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase__ = min(low[at] , low[to] )
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
if not visited[i]:
dfs(UpperCamelCase_ , -1 , UpperCamelCase_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
def a__ ( A_ ):
'''simple docstring'''
if len(A_ ) <= 1:
return lst
__magic_name__ = 1
while i < len(A_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__magic_name__ , __magic_name__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
__magic_name__ = 1
return lst
if __name__ == "__main__":
__lowerCAmelCase : Any = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Dict = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 88 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
UpperCAmelCase_ = {'mobilebert-uncased': 512}
UpperCAmelCase_ = {}
class lowercase__ ( __lowercase ):
'''simple docstring'''
a : Optional[int] = VOCAB_FILES_NAMES
a : int = PRETRAINED_VOCAB_FILES_MAP
a : Tuple = PRETRAINED_INIT_CONFIGURATION
a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = MobileBertTokenizer
def __init__( self, __magic_name__=None, __magic_name__=None, __magic_name__=True, __magic_name__="[UNK]", __magic_name__="[SEP]", __magic_name__="[PAD]", __magic_name__="[CLS]", __magic_name__="[MASK]", __magic_name__=True, __magic_name__=None, **__magic_name__, ) -> str:
"""simple docstring"""
super().__init__(
UpperCAmelCase__, tokenizer_file=UpperCAmelCase__, do_lower_case=UpperCAmelCase__, unk_token=UpperCAmelCase__, sep_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, cls_token=UpperCAmelCase__, mask_token=UpperCAmelCase__, tokenize_chinese_chars=UpperCAmelCase__, strip_accents=UpperCAmelCase__, **UpperCAmelCase__, )
UpperCamelCase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''', UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', UpperCAmelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase__ : int = getattr(UpperCAmelCase__, normalizer_state.pop('''type''' ) )
UpperCamelCase__ : Union[str, Any] = do_lower_case
UpperCamelCase__ : Tuple = strip_accents
UpperCamelCase__ : Dict = tokenize_chinese_chars
UpperCamelCase__ : Tuple = normalizer_class(**UpperCAmelCase__ )
UpperCamelCase__ : List[str] = do_lower_case
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase__ : Dict = [self.sep_token_id]
UpperCamelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self._tokenizer.model.save(UpperCAmelCase__, name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 201 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase (metaclass=__lowercase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['''keras_nlp''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''keras_nlp'''] )
| 177 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case :
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 0)-> None:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Any = row, column
__lowerCAmelCase: List[Any] = [[default_value for c in range(UpperCAmelCase__)] for r in range(UpperCAmelCase__)]
def __str__( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: List[str] = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
__lowerCAmelCase: int = 0
for row_vector in self.array:
for obj in row_vector:
__lowerCAmelCase: Tuple = max(UpperCAmelCase__ , len(str(UpperCAmelCase__)))
__lowerCAmelCase: Optional[int] = f"%{max_element_length}s"
# Make string and return
def single_line(UpperCamelCase__ : list[float]) -> str:
nonlocal string_format_identifier
__lowerCAmelCase: Optional[Any] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__) for row_vector in self.array)
return s
def __repr__( self : List[str])-> str:
'''simple docstring'''
return str(self)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : tuple[int, int])-> bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase__ , (list, tuple)) and len(UpperCAmelCase__) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCamelCase__ : tuple[int, int])-> Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : float)-> None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase__)
__lowerCAmelCase: str = value
def __add__( self : Any , UpperCamelCase__ : Matrix)-> Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == another.row and self.column == another.column
# Add
__lowerCAmelCase: Union[str, Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__lowerCAmelCase: List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : int)-> Matrix:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__lowerCAmelCase: str = -self[r, c]
return result
def __sub__( self : str , UpperCamelCase__ : Matrix)-> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self : str , UpperCamelCase__ : int | float | Matrix)-> Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , (int, float)): # Scalar multiplication
__lowerCAmelCase: List[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__lowerCAmelCase: List[str] = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # Matrix multiplication
assert self.column == another.row
__lowerCAmelCase: Dict = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__lowerCAmelCase: Union[str, Any] = f"Unsupported type given for another ({type(UpperCAmelCase__)})"
raise TypeError(UpperCAmelCase__)
def lowercase_ ( self : Optional[Any])-> Matrix:
'''simple docstring'''
__lowerCAmelCase: str = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
__lowerCAmelCase: Any = self[r, c]
return result
def lowercase_ ( self : List[str] , UpperCamelCase__ : Matrix , UpperCamelCase__ : Matrix)-> Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__) and isinstance(UpperCAmelCase__ , UpperCAmelCase__)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__lowerCAmelCase: Union[str, Any] = v.transpose()
__lowerCAmelCase: Any = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a__ ( ) -> Union[str, Any]:
# a^(-1)
__lowerCAmelCase: List[str] = Matrix(3 , 3 , 0 )
for i in range(3 ):
__lowerCAmelCase: Dict = 1
print(F"a^(-1) is {ainv}" )
# u, v
__lowerCAmelCase: Any = Matrix(3 , 1 , 0 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = 1, 2, -3
__lowerCAmelCase: Tuple = Matrix(3 , 1 , 0 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Tuple = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}" )
def a__ ( ) -> int:
import doctest
doctest.testmod()
testa()
| 217 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
def get_matched_characters(snake_case_ :str , snake_case_ :str ) -> str:
__UpperCAmelCase = []
__UpperCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__UpperCAmelCase = int(max(0 , i - limit ) )
__UpperCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(snake_case_ )
__UpperCAmelCase = F'''{_stra[0:_stra.index(snake_case_ )]} {_stra[_stra.index(snake_case_ ) + 1:]}'''
return "".join(snake_case_ )
# matching characters
__UpperCAmelCase = get_matched_characters(snake_case_ , snake_case_ )
__UpperCAmelCase = get_matched_characters(snake_case_ , snake_case_ )
__UpperCAmelCase = len(snake_case_ )
# transposition
__UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(snake_case_ , snake_case_ ) if ca != ca] ) // 2
)
if not match_count:
__UpperCAmelCase = 0.0
else:
__UpperCAmelCase = (
1
/ 3
* (
match_count / len(snake_case_ )
+ match_count / len(snake_case_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__UpperCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 332 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A : Any = logging.get_logger(__name__)
__A : List[Any] = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _a ( __lowercase):
"""simple docstring"""
UpperCamelCase__ = '''dpt'''
def __init__( self : int , __UpperCamelCase : List[Any]=7_6_8 , __UpperCamelCase : Optional[Any]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : List[str]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[Any]=1e-12 , __UpperCamelCase : List[str]=3_8_4 , __UpperCamelCase : int=1_6 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : List[str]=False , __UpperCamelCase : str=True , __UpperCamelCase : str=[2, 5, 8, 1_1] , __UpperCamelCase : Union[str, Any]="project" , __UpperCamelCase : List[Any]=[4, 2, 1, 0.5] , __UpperCamelCase : List[Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , __UpperCamelCase : Any=2_5_6 , __UpperCamelCase : Optional[Any]=-1 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict=0.4 , __UpperCamelCase : Any=2_5_5 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Dict=[1, 1_0_2_4, 2_4, 2_4] , __UpperCamelCase : Any=[0, 1] , __UpperCamelCase : Dict=None , **__UpperCamelCase : str , )->int:
super().__init__(**UpperCAmelCase__ )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = []
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_UpperCAmelCase = readout_type
_UpperCAmelCase = reassemble_factors
_UpperCAmelCase = neck_hidden_sizes
_UpperCAmelCase = fusion_hidden_size
_UpperCAmelCase = head_in_index
_UpperCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = semantic_loss_ignore_index
_UpperCAmelCase = semantic_classifier_dropout
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 260 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
def run_func(lowerCamelCase__ ):
@wraps(lowerCamelCase__ )
def run_in_eager_mode(*lowerCamelCase__ , **lowerCamelCase__ ):
return func(*lowerCamelCase__ , **lowerCamelCase__ )
@wraps(lowerCamelCase__ )
@tf.function(experimental_compile=lowerCamelCase__ )
def run_in_graph_mode(*lowerCamelCase__ , **lowerCamelCase__ ):
return func(*lowerCamelCase__ , **lowerCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = random.Random()
__lowerCamelCase : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class A_ ( __lowercase ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def lowerCAmelCase ( self : int):
return tf.__version__
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
# initialize GPU on separate process
__lowerCamelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__lowerCamelCase : Optional[int] = self._prepare_inference_func(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
return self._measure_speed(_inference)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[str] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__lowerCamelCase : List[Any] = self._prepare_train_func(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
return self._measure_speed(_train)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,UpperCAmelCase__)
__lowerCamelCase : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__lowerCamelCase : List[Any] = self._prepare_inference_func(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
return self._measure_memory(_inference)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,UpperCAmelCase__)
__lowerCamelCase : str = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
__lowerCamelCase : List[Any] = self._prepare_train_func(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
return self._measure_memory(_train)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.')
__lowerCamelCase : Tuple = (
hasattr(UpperCAmelCase__ ,'architectures')
and isinstance(config.architectures ,UpperCAmelCase__)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase : Optional[int] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase : str = __import__('transformers' ,fromlist=[model_class])
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : List[Any] = model_cls(UpperCAmelCase__)
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
__lowerCamelCase : Dict = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__)
# encoder-decoder has vocab size saved differently
__lowerCamelCase : Any = config.vocab_size if hasattr(UpperCAmelCase__ ,'vocab_size') else config.encoder.vocab_size
__lowerCamelCase : List[str] = random_input_ids(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla)
def encoder_decoder_forward():
return model(UpperCAmelCase__ ,decoder_input_ids=UpperCAmelCase__ ,training=UpperCAmelCase__)
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla)
def encoder_forward():
return model(UpperCAmelCase__ ,training=UpperCAmelCase__)
__lowerCamelCase : List[str] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[int] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.')
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.')
__lowerCamelCase : str = (
hasattr(UpperCAmelCase__ ,'architectures')
and isinstance(config.architectures ,UpperCAmelCase__)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCamelCase : Any = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCamelCase : List[str] = __import__('transformers' ,fromlist=[model_class])
__lowerCamelCase : str = getattr(UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : Optional[Any] = model_cls(UpperCAmelCase__)
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
__lowerCamelCase : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__)
# encoder-decoder has vocab size saved differently
__lowerCamelCase : str = config.vocab_size if hasattr(UpperCAmelCase__ ,'vocab_size') else config.encoder.vocab_size
__lowerCamelCase : Optional[Any] = random_input_ids(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla)
def encoder_decoder_train():
__lowerCamelCase : List[Any] = model(UpperCAmelCase__ ,decoder_input_ids=UpperCAmelCase__ ,labels=UpperCAmelCase__ ,training=UpperCAmelCase__)[0]
__lowerCamelCase : List[str] = tf.gradients(UpperCAmelCase__ ,model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla)
def encoder_train():
__lowerCamelCase : Dict = model(UpperCAmelCase__ ,labels=UpperCAmelCase__ ,training=UpperCAmelCase__)[0]
__lowerCamelCase : str = tf.gradients(UpperCAmelCase__ ,model.trainable_variables)
return gradients
__lowerCamelCase : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation')
timeit.repeat(UpperCAmelCase__ ,repeat=1 ,number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowerCamelCase : List[Any] = timeit.repeat(
UpperCAmelCase__ ,repeat=self.args.repeat ,number=1_0 ,)
return min(UpperCAmelCase__) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn\'t fit on GPU. {e}")
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Callable[[], None]):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.')
__lowerCamelCase : Tuple = start_memory_tracing('transformers')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.')
__lowerCamelCase : Union[str, Any] = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.')
# init nvml
nvml.nvmlInit()
func()
__lowerCamelCase : Tuple = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
__lowerCamelCase : Tuple = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__)
__lowerCamelCase : Optional[int] = meminfo.used
__lowerCamelCase : Tuple = Memory(UpperCAmelCase__)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.')
__lowerCamelCase : List[Any] = None
else:
__lowerCamelCase : Optional[Any] = measure_peak_memory_cpu(UpperCAmelCase__)
__lowerCamelCase : Optional[Any] = Memory(UpperCAmelCase__) if isinstance(UpperCAmelCase__ ,UpperCAmelCase__) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowerCamelCase : Tuple = stop_memory_tracing(UpperCAmelCase__)
if memory is None:
__lowerCamelCase : Any = summary.total
else:
__lowerCamelCase : Union[str, Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn\'t fit on GPU. {e}")
return "N/A", None
| 73 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : int = logging.get_logger(__name__)
set_seed(7_70)
UpperCAmelCase : Tuple = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCAmelCase : Optional[int] = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCAmelCase : int = os.path.dirname(os.path.abspath(__file__))
UpperCAmelCase : Union[str, Any] = os.path.join(os.path.expanduser("~"), ".cache")
UpperCAmelCase : Dict = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=False ):
'''simple docstring'''
lowerCamelCase = model_type
if use_small:
key += "_small"
return os.path.join(lowerCamelCase__ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
hf_hub_download(repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , local_dir=lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Optional[int]="text" ):
'''simple docstring'''
if model_type == "text":
lowerCamelCase = BarkSemanticModel
lowerCamelCase = BarkSemanticConfig
lowerCamelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCamelCase = BarkCoarseModel
lowerCamelCase = BarkCoarseConfig
lowerCamelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCamelCase = BarkFineModel
lowerCamelCase = BarkFineConfig
lowerCamelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCamelCase = f'{model_type}_small' if use_small else model_type
lowerCamelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCamelCase__ ):
logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowerCamelCase = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
# this is a hack
lowerCamelCase = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowerCamelCase = model_args["""vocab_size"""]
lowerCamelCase = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCamelCase = model_args.pop("""n_head""" )
lowerCamelCase = model_args.pop("""n_embd""" )
lowerCamelCase = model_args.pop("""n_layer""" )
lowerCamelCase = ConfigClass(**checkpoint["""model_args"""] )
lowerCamelCase = ModelClass(config=lowerCamelCase__ )
lowerCamelCase = GenerationConfigClass()
lowerCamelCase = model_generation_config
lowerCamelCase = checkpoint["""model"""]
# fixup checkpoint
lowerCamelCase = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowerCamelCase__ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCamelCase = k[len(lowerCamelCase__ ) :]
for old_layer_name in new_layer_name_dict:
lowerCamelCase = new_k.replace(lowerCamelCase__ , new_layer_name_dict[old_layer_name] )
lowerCamelCase = state_dict.pop(lowerCamelCase__ )
lowerCamelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCamelCase = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowerCamelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCamelCase = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowerCamelCase__ ) != 0:
raise ValueError(f'extra keys found: {extra_keys}' )
if len(lowerCamelCase__ ) != 0:
raise ValueError(f'missing keys: {missing_keys}' )
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
lowerCamelCase = model.num_parameters(exclude_embeddings=lowerCamelCase__ )
lowerCamelCase = checkpoint["""best_val_loss"""].item()
logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCamelCase__ , 3 )} loss' )
model.eval()
model.to(lowerCamelCase__ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Union[str, Any]="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCamelCase = """cpu""" # do conversion on cpu
lowerCamelCase = _get_ckpt_path(lowerCamelCase__ , use_small=lowerCamelCase__ )
lowerCamelCase = _load_model(lowerCamelCase__ , lowerCamelCase__ , model_type=lowerCamelCase__ , use_small=lowerCamelCase__ )
# load bark initial model
lowerCamelCase = _bark_load_model(lowerCamelCase__ , """cpu""" , model_type=lowerCamelCase__ , use_small=lowerCamelCase__ )
if model_type == "text":
lowerCamelCase = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowerCamelCase__ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don\'t have the same number of parameters""" )
# check if same output as the bark model
lowerCamelCase = 5
lowerCamelCase = 10
if model_type in ["text", "coarse"]:
lowerCamelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowerCamelCase = bark_model(lowerCamelCase__ )[0]
lowerCamelCase = model(lowerCamelCase__ )
# take last logits
lowerCamelCase = output_new_model_total.logits[:, [-1], :]
else:
lowerCamelCase = 3
lowerCamelCase = 8
lowerCamelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCamelCase = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = bark_model(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don\'t have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : str , ):
'''simple docstring'''
lowerCamelCase = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase__ , """config.json""" ) )
lowerCamelCase = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase__ , """config.json""" ) )
lowerCamelCase = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase__ , """config.json""" ) )
lowerCamelCase = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowerCamelCase = BarkSemanticModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = BarkCoarseModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = BarkFineModel.from_pretrained(lowerCamelCase__ )
lowerCamelCase = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowerCamelCase = BarkConfig.from_sub_model_configs(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCamelCase = BarkModel(lowerCamelCase__ )
lowerCamelCase = semantic
lowerCamelCase = coarseAcoustic
lowerCamelCase = fineAcoustic
lowerCamelCase = codec
lowerCamelCase = bark_generation_config
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
bark.save_pretrained(lowerCamelCase__ , repo_id=lowerCamelCase__ , push_to_hub=lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
UpperCAmelCase : Tuple = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 252 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCamelCase__ ( lowercase__ : Callable , lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ):
snake_case : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
snake_case : Optional[int] = np.zeros((n + 1,) )
snake_case : Any = ya
snake_case : Any = xa
for k in range(lowercase__ ):
snake_case : List[str] = y[k] + step_size * ode_func(lowercase__ , y[k] )
snake_case : Dict = y[k] + (
(step_size / 2) * (ode_func(lowercase__ , y[k] ) + ode_func(x + step_size , lowercase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A : Tuple = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
A : Dict = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
A : List[str] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return float((preds == labels).mean() )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="binary" ):
'''simple docstring'''
__lowerCAmelCase = simple_accuracy(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase , average=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {}
for id_pred, label in zip(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = f"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
__lowerCAmelCase = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowerCAmelCase = [(pred, label)]
__lowerCAmelCase , __lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
__lowerCAmelCase , __lowerCAmelCase = zip(*_UpperCamelCase )
__lowerCAmelCase = fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase , average="macro" )
fas.append(_UpperCamelCase )
__lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(_UpperCamelCase ) )
ems.append(_UpperCamelCase )
__lowerCAmelCase = float(sum(_UpperCamelCase ) / len(_UpperCamelCase ) )
__lowerCAmelCase = sum(_UpperCamelCase ) / len(_UpperCamelCase )
__lowerCAmelCase = float(fa_score(y_true=_UpperCamelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def snake_case ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def snake_case ( self , __a , __a ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg="macro" )
elif self.config_name == "record":
__lowerCAmelCase = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
__lowerCAmelCase = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 57 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _a ( UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = tokenizer(example["content"] , truncation=UpperCamelCase_ )["input_ids"]
lowerCAmelCase__ = len(example["content"] ) / len(output["input_ids"] )
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='''train''')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 340 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 88 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
UpperCAmelCase_ = {
'facebook/s2t-small-librispeech-asr': 1024,
}
UpperCAmelCase_ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
UpperCAmelCase_ = {'mustc': MUSTC_LANGS}
class lowercase__ ( __lowercase ):
'''simple docstring'''
a : int = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = MAX_MODEL_INPUT_SIZES
a : List[str] = ['''input_ids''', '''attention_mask''']
a : List[int] = []
def __init__( self, __magic_name__, __magic_name__, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="<pad>", __magic_name__="<unk>", __magic_name__=False, __magic_name__=False, __magic_name__=None, __magic_name__=None, __magic_name__ = None, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, unk_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, do_upper_case=UpperCAmelCase__, do_lower_case=UpperCAmelCase__, tgt_lang=UpperCAmelCase__, lang_codes=UpperCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCAmelCase__, )
UpperCamelCase__ : List[Any] = do_upper_case
UpperCamelCase__ : List[str] = do_lower_case
UpperCamelCase__ : Optional[Any] = load_json(UpperCAmelCase__ )
UpperCamelCase__ : str = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : str = spm_file
UpperCamelCase__ : List[Any] = load_spm(UpperCAmelCase__, self.sp_model_kwargs )
if lang_codes is not None:
UpperCamelCase__ : Optional[Any] = lang_codes
UpperCamelCase__ : Optional[Any] = LANGUAGES[lang_codes]
UpperCamelCase__ : Dict = [f"<lang:{lang}>" for lang in self.langs]
UpperCamelCase__ : Union[str, Any] = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
UpperCamelCase__ : Optional[int] = self.lang_tokens
UpperCamelCase__ : Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCamelCase__ : str = {}
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.lang_code_to_id[tgt_lang]
UpperCamelCase__ : Dict = [lang_code_id]
def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase__, out_type=UpperCAmelCase__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(UpperCAmelCase__, self.encoder[self.unk_token] )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
return self.decoder.get(UpperCAmelCase__, self.unk_token )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = []
UpperCamelCase__ : List[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCamelCase__ : List[Any] = self.sp_model.decode(UpperCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCamelCase__ : int = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
UpperCamelCase__ : int = self.sp_model.decode(UpperCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__, token_ids_a=UpperCAmelCase__, already_has_special_tokens=UpperCAmelCase__ )
UpperCamelCase__ : Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCamelCase__ : Union[str, Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + ([0] * len(UpperCAmelCase__ )) + suffix_ones
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.__dict__.copy()
UpperCamelCase__ : Dict = None
return state
def __setstate__( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : str = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Optional[int] = load_spm(self.spm_file, self.sp_model_kwargs )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : str = Path(UpperCAmelCase__ )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
UpperCamelCase__ : List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCamelCase__ : str = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder, UpperCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, UpperCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase__, '''wb''' ) as fi:
UpperCamelCase__ : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (str(UpperCAmelCase__ ), str(UpperCAmelCase__ ))
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Dict[str, Any] ) -> List[str]:
UpperCamelCase__ : int = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Tuple:
with open(__UpperCAmelCase , '''r''' ) as f:
return json.load(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: str ) -> List[Any]:
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 201 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import heapq
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: str = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__UpperCAmelCase , [-1 * len(__UpperCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase__: Any = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase__: List[Any] = heapq.heappop(__UpperCAmelCase )[1][0]
chosen_vertices.add(__UpperCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase__: Any = elem[1][1].index(__UpperCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__UpperCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 177 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__A = open # noqa: we just need to have a builtin inside this module to test it properly
| 217 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : Dict , _lowercase : Union[str, Any]=3 , _lowercase : Tuple=32 , _lowercase : Union[str, Any]=3 , _lowercase : List[str]=10 , _lowercase : Any=[8, 16, 32, 64] , _lowercase : Optional[int]=[1, 1, 2, 1] , _lowercase : Optional[Any]=True , _lowercase : Dict=True , _lowercase : List[str]="relu" , _lowercase : int=3 , _lowercase : Tuple=None , _lowercase : Dict=["stage2", "stage3", "stage4"] , _lowercase : Optional[Any]=[2, 3, 4] , _lowercase : List[str]=1 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embeddings_size
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = depths
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_act
__UpperCAmelCase = num_labels
__UpperCAmelCase = scope
__UpperCAmelCase = len(UpperCAmelCase__ )
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
__UpperCAmelCase = num_groups
def a ( self : str ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Tuple ):
__UpperCAmelCase = BitModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : List[Any] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = BitForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : Any , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : str ):
__UpperCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase = None
__UpperCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
a__ : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ : Union[str, Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
a__ : str = False
a__ : List[Any] = False
a__ : Dict = False
a__ : List[str] = False
a__ : List[Any] = False
def a ( self : Optional[Any] ):
__UpperCAmelCase = BitModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : Optional[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def a ( self : Any ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def a ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def a ( self : Any ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(UpperCAmelCase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def a ( self : str ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def a ( self : List[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=UpperCAmelCase__ )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def a ( self : int ):
def check_hidden_states_output(_lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Tuple ):
__UpperCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
__UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase = layer_type
__UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def a ( self : Tuple ):
pass
def a ( self : str ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def a ( self : Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = BitModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowercase__ ( ):
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a ( self : Optional[int] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def a ( self : Optional[Any] ):
__UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**UpperCAmelCase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
__UpperCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@require_torch
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
a__ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
a__ : Optional[int] = BitConfig
a__ : Optional[Any] = False
def a ( self : Dict ):
__UpperCAmelCase = BitModelTester(self )
| 332 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : Any = random.Random()
if is_torch_available():
import torch
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict=1.0 , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=7 , __UpperCamelCase : int=4_0_0 , __UpperCamelCase : int=2_0_0_0 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Tuple=1_6_0_0_0 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Union[str, Any]=True , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def lowercase__ ( self : Optional[Any] )->List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : str , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Union[str, Any]=False )->Optional[Any]:
def _flatten(__UpperCamelCase : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
_UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( __lowercase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = ASTFeatureExtractor
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = ASTFeatureExtractionTester(self )
def lowercase__ ( self : Optional[int] )->Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase = np.asarray(UpperCAmelCase__ )
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
@require_torch
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase__ ( self : int , __UpperCamelCase : str )->Tuple:
from datasets import load_dataset
_UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort('''id''' ).select(range(UpperCAmelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowercase__ ( self : str )->Optional[Any]:
# fmt: off
_UpperCAmelCase = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = ASTFeatureExtractor()
_UpperCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1e-4 ) )
| 260 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a =[
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=True ) -> Dict:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class A_ ( __lowercase ):
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Dict = None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
with TemporaryDirectory() as tmp_dir:
__lowerCamelCase : int = dataset_module_factory(UpperCAmelCase__ ,cache_dir=UpperCAmelCase__)
__lowerCamelCase : Union[str, Any] = import_main_class(dataset_module.module_path ,dataset=UpperCAmelCase__)
__lowerCamelCase : Any = builder_cls(
cache_dir=UpperCAmelCase__ ,config_name=UpperCAmelCase__ ,hash=dataset_module.hash ,)
__lowerCamelCase : int = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase__).replace(os.sep ,'/'),
config.DATASET_INFO_FILENAME,
])
__lowerCamelCase : Union[str, Any] = cached_path(UpperCAmelCase__ ,cache_dir=UpperCAmelCase__)
self.assertTrue(os.path.exists(UpperCAmelCase__))
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Dict = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
__lowerCamelCase : Any = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase__ )
__lowerCamelCase : List[Any] = import_main_class(dataset_module.module_path )
__lowerCamelCase : List[str] = builder_cls(
cache_dir=lowerCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowerCamelCase : str = None
builder_instance.download_and_prepare()
__lowerCamelCase : Optional[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : Optional[Any] = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase__ )
__lowerCamelCase : Optional[int] = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ )
__lowerCamelCase : str = builder_cls(
cache_dir=lowerCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
__lowerCamelCase : Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert "train" in ds
assert isinstance(ds['train'] , lowerCamelCase__ )
assert next(iter(ds['train'] ) )
| 73 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=3 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Dict:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> str:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def __A ( self , A , A , A , A , A , A , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCamelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
'''simple docstring'''
lowerCamelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowerCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["""hidden_states"""][0]
lowerCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["""hidden_states"""][0]
# select random slice
lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase : List[Any] = (FalconForCausalLM,) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = FalconModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase , *lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCamelCase = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """single_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCamelCase = input_ids.shape[0]
lowerCamelCase = model._convert_to_rw_cache(result.past_key_values )
lowerCamelCase = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """multi_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , """use_cache""" ):
return
lowerCamelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
lowerCamelCase = True
lowerCamelCase = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCamelCase = (
getattr(UpperCAmelCase__ , """decoder_layers""" , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , """num_decoder_layers""" , UpperCAmelCase__ )
or config.num_hidden_layers
)
lowerCamelCase = getattr(UpperCAmelCase__ , """num_kv_heads""" , config.num_attention_heads )
lowerCamelCase = getattr(UpperCAmelCase__ , """d_model""" , config.hidden_size )
lowerCamelCase = embed_dim // num_attention_heads
lowerCamelCase = outputs["""past_key_values"""]
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase = inputs["""input_ids"""].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
lowerCamelCase = config.num_attention_heads
elif config.multi_query:
lowerCamelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowerCamelCase = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(UpperCAmelCase__ )
lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCAmelCase__ )
lowerCamelCase = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowerCamelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19 )
lowerCamelCase = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCamelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCamelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCamelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
lowerCamelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
lowerCamelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
lowerCamelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 252 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase__ ( lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Dict ):
# load base model
snake_case : str = StableDiffusionPipeline.from_pretrained(lowercase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
snake_case : List[Any] = load_file(lowercase__ )
snake_case : List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
snake_case : Optional[int] = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
snake_case : Optional[int] = pipeline.text_encoder
else:
snake_case : Optional[int] = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
snake_case : Tuple = pipeline.unet
# find the target layer
snake_case : Tuple = layer_infos.pop(0 )
while len(lowercase__ ) > -1:
try:
snake_case : List[str] = curr_layer.__getattr__(lowercase__ )
if len(lowercase__ ) > 0:
snake_case : str = layer_infos.pop(0 )
elif len(lowercase__ ) == 0:
break
except Exception:
if len(lowercase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
snake_case : Tuple = layer_infos.pop(0 )
snake_case : Optional[int] = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(lowercase__ )
else:
pair_keys.append(lowercase__ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
snake_case : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
snake_case : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase__ , lowercase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
snake_case : Tuple = state_dict[pair_keys[0]].to(torch.floataa )
snake_case : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase__ , lowercase__ )
# update visited list
for item in pair_keys:
visited.append(lowercase__ )
return pipeline
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__A = parser.parse_args()
__A = args.base_model_path
__A = args.checkpoint_path
__A = args.dump_path
__A = args.lora_prefix_unet
__A = args.lora_prefix_text_encoder
__A = args.alpha
__A = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__A = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 148 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
if len(UpperCAmelCase__ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
__lowerCAmelCase = list(UpperCAmelCase__ )
__lowerCAmelCase = degree
def __add__( self , __a ):
if self.degree > polynomial_a.degree:
__lowerCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCAmelCase__ )
else:
__lowerCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCAmelCase__ )
def __sub__( self , __a ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , __a ):
__lowerCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCAmelCase__ )
def snake_case ( self , __a ):
__lowerCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
__lowerCAmelCase = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCAmelCase__ )
return polynomial
def __repr__( self ):
return self.__str__()
def snake_case ( self ):
__lowerCAmelCase = [0] * self.degree
for i in range(self.degree ):
__lowerCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCAmelCase__ )
def snake_case ( self , __a = 0 ):
__lowerCAmelCase = [0] * (self.degree + 2)
__lowerCAmelCase = constant
for i in range(self.degree + 1 ):
__lowerCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCAmelCase__ )
def __eq__( self , __a ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , __a ):
return not self.__eq__(UpperCAmelCase__ )
| 57 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
import logging
import os
from .state import PartialState
class lowercase__ ( logging.LoggerAdapter ):
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
lowerCAmelCase__ = kwargs.pop("main_process_only" , UpperCAmelCase__ )
lowerCAmelCase__ = kwargs.pop("in_order" , UpperCAmelCase__ )
if self.isEnabledFor(UpperCAmelCase__ ):
if self._should_log(UpperCAmelCase__ ):
lowerCAmelCase__ , lowerCAmelCase__ = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
elif in_order:
lowerCAmelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCAmelCase__ , lowerCAmelCase__ = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
state.wait_for_everyone()
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : str = None ) -> int:
"""simple docstring"""
if log_level is None:
lowerCAmelCase__ = os.environ.get("ACCELERATE_LOG_LEVEL" , UpperCamelCase_ )
lowerCAmelCase__ = logging.getLogger(UpperCamelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase_ , {} )
| 340 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__lowerCAmelCase : List[Any] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__magic_name__ = int(re.match(R""".*layer_(\d*).*""", A_ )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def a__ ( A_ ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
__magic_name__ = re.search(R"""[^\d](\d+)$""", str(A_ ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
__magic_name__ = int(bit_search.groups()[0] )
return bit_size // 8
def a__ ( A_, A_, A_, A_, A_ ):
'''simple docstring'''
if bloom_config_file == "":
__magic_name__ = BloomConfig()
else:
__magic_name__ = BloomConfig.from_json_file(A_ )
if shard_model:
__magic_name__ = os.listdir(A_ )
__magic_name__ = sorted(filter(lambda A_ : s.startswith("""layer""" ) and "model_00" in s, A_ ) )
__magic_name__ = {"""weight_map""": {}, """metadata""": {}}
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = BloomConfig()
for j, file in enumerate(A_ ):
print("""Processing file: {}""".format(A_ ) )
__magic_name__ = None
for i in range(A_ ):
# load all TP files
__magic_name__ = file.replace("""model_00""", f'''model_0{i}''' )
__magic_name__ = torch.load(os.path.join(A_, A_ ), map_location="""cpu""" )
# Rename keys in the transformers names
__magic_name__ = list(temp.keys() )
for key in keys:
__magic_name__ = temp.pop(A_ )
if tensors is None:
__magic_name__ = temp
else:
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__ = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__ = tensors[key] / pretraining_tp
torch.save(
A_, os.path.join(
A_, """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) ), ), )
for key in tensors.keys():
__magic_name__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__magic_name__ = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ), str(len(A_ ) ).zfill(5 ) )
__magic_name__ = BloomConfig()
__magic_name__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__magic_name__ = total_size
with open(A_, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A_, WEIGHTS_NAME + """.index.json""" ), """w""", encoding="""utf-8""" ) as f:
__magic_name__ = json.dumps(A_, indent=2, sort_keys=A_ ) + """\n"""
f.write(A_ )
else:
__magic_name__ = BloomModel(A_ )
__magic_name__ = os.listdir(A_ )
__magic_name__ = sorted(filter(lambda A_ : s.startswith("""layer""" ) and "model_00" in s, A_ ) )
__magic_name__ = None
for i, file in enumerate(A_ ):
__magic_name__ = None
for i in range(A_ ):
# load all TP files
__magic_name__ = file.replace("""model_00""", f'''model_0{i}''' )
__magic_name__ = torch.load(os.path.join(A_, A_ ), map_location="""cpu""" )
# Rename keys in the transformers names
__magic_name__ = list(temp.keys() )
for key in keys:
__magic_name__ = temp.pop(A_ )
if tensors is None:
__magic_name__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__magic_name__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__magic_name__ = torch.cat([tensors[key], temp[key]], dim=A_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__magic_name__ = tensors[key] / pretraining_tp
__magic_name__ = model.load_state_dict(A_, strict=A_ )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
__magic_name__ = set(other_keys.missing_keys )
else:
__magic_name__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(A_, exist_ok=A_ )
__magic_name__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__magic_name__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
__magic_name__ = model.to(config.torch_dtype )
torch.save(model.state_dict(), A_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 88 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase_ = 'scheduler_config.json'
class lowercase__ ( __lowercase ):
'''simple docstring'''
a : Optional[int] = 1
a : List[str] = 2
a : str = 3
a : Any = 4
a : Dict = 5
@dataclass
class lowercase__ ( __lowercase ):
'''simple docstring'''
a : jnp.ndarray
class lowercase__ :
'''simple docstring'''
a : Any = SCHEDULER_CONFIG_NAME
a : str = ['''dtype''']
a : int = []
a : Dict = True
@classmethod
def UpperCamelCase__ ( cls, __magic_name__ = None, __magic_name__ = None, __magic_name__=False, **__magic_name__, ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase__, subfolder=UpperCAmelCase__, return_unused_kwargs=UpperCAmelCase__, **UpperCAmelCase__, )
UpperCamelCase__ ,UpperCamelCase__ : int = cls.from_config(UpperCAmelCase__, return_unused_kwargs=UpperCAmelCase__, **UpperCAmelCase__ )
if hasattr(UpperCAmelCase__, '''create_state''' ) and getattr(UpperCAmelCase__, '''has_state''', UpperCAmelCase__ ):
UpperCamelCase__ : Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = False, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
self.save_config(save_directory=UpperCAmelCase__, push_to_hub=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase__ : str = importlib.import_module(__name__.split('''.''' )[0] )
UpperCamelCase__ : Any = [
getattr(UpperCAmelCase__, UpperCAmelCase__ ) for c in compatible_classes_str if hasattr(UpperCAmelCase__, UpperCAmelCase__ )
]
return compatible_classes
def lowerCAmelCase_ ( __UpperCAmelCase: jnp.ndarray , __UpperCAmelCase: Tuple[int] ) -> str:
assert len(__UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__UpperCAmelCase ) - x.ndim) ) , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Dict=0.999 , __UpperCAmelCase: Optional[Any]=jnp.floataa ) -> Optional[Any]:
def alpha_bar(__UpperCAmelCase: Dict ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
UpperCamelCase__ : Dict = []
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = i / num_diffusion_timesteps
UpperCamelCase__ : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__UpperCAmelCase ) / alpha_bar(__UpperCAmelCase ) , __UpperCAmelCase ) )
return jnp.array(__UpperCAmelCase , dtype=__UpperCAmelCase )
@flax.struct.dataclass
class lowercase__ :
'''simple docstring'''
a : jnp.ndarray
a : jnp.ndarray
a : jnp.ndarray
@classmethod
def UpperCamelCase__ ( cls, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = scheduler.config
if config.trained_betas is not None:
UpperCamelCase__ : Optional[int] = jnp.asarray(config.trained_betas, dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
UpperCamelCase__ : Union[str, Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase__ : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase__ : Any = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
UpperCamelCase__ : Dict = 1.0 - betas
UpperCamelCase__ : List[str] = jnp.cumprod(UpperCAmelCase__, axis=0 )
return cls(
alphas=UpperCAmelCase__, betas=UpperCAmelCase__, alphas_cumprod=UpperCAmelCase__, )
def lowerCAmelCase_ ( __UpperCAmelCase: CommonSchedulerState , __UpperCAmelCase: jnp.ndarray , __UpperCAmelCase: jnp.ndarray , __UpperCAmelCase: jnp.ndarray ) -> Union[str, Any]:
UpperCamelCase__ : Any = state.alphas_cumprod
UpperCamelCase__ : List[Any] = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase__ : Any = sqrt_alpha_prod.flatten()
UpperCamelCase__ : int = broadcast_to_shape_from_left(__UpperCAmelCase , original_samples.shape )
UpperCamelCase__ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase__ : Dict = sqrt_one_minus_alpha_prod.flatten()
UpperCamelCase__ : Any = broadcast_to_shape_from_left(__UpperCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( __UpperCAmelCase: CommonSchedulerState , __UpperCAmelCase: jnp.ndarray , __UpperCAmelCase: jnp.ndarray , __UpperCAmelCase: jnp.ndarray ) -> Any:
UpperCamelCase__ ,UpperCamelCase__ : int = get_sqrt_alpha_prod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( __UpperCAmelCase: CommonSchedulerState , __UpperCAmelCase: jnp.ndarray , __UpperCAmelCase: jnp.ndarray , __UpperCAmelCase: jnp.ndarray ) -> str:
UpperCamelCase__ ,UpperCamelCase__ : List[str] = get_sqrt_alpha_prod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 201 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: List[Any] = 0
lowercase__: Union[str, Any] = len(__UpperCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__: List[Any] = i + 1
else:
lowercase__: Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 177 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case ( __lowercase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''biogpt'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any]=4_2_3_8_4 , UpperCamelCase__ : List[str]=1_0_2_4 , UpperCamelCase__ : Optional[Any]=2_4 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : List[Any]=4_0_9_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=1_0_2_4 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Union[str, Any]=1e-12 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , )-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = vocab_size
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: Optional[int] = hidden_size
__lowerCAmelCase: Optional[Any] = num_hidden_layers
__lowerCAmelCase: int = num_attention_heads
__lowerCAmelCase: Optional[int] = intermediate_size
__lowerCAmelCase: Tuple = hidden_act
__lowerCAmelCase: Optional[int] = hidden_dropout_prob
__lowerCAmelCase: Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase: Tuple = initializer_range
__lowerCAmelCase: int = layer_norm_eps
__lowerCAmelCase: str = scale_embedding
__lowerCAmelCase: str = use_cache
__lowerCAmelCase: Any = layerdrop
__lowerCAmelCase: Dict = activation_dropout
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
| 217 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_lowercase : List[Any] = True
from torch.cuda.amp import autocast
_lowercase : str = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
a__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a__ : Optional[bool] = field(
default=__lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
a__ : Optional[bool] = field(
default=__lowercase , metadata={"help": "Whether to log verbose messages or not."} , )
a__ : Optional[float] = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
a__ : Optional[float] = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
a__ : Optional[float] = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def lowercase__ ( snake_case_ :ModelArguments , snake_case_ :TrainingArguments ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
__UpperCAmelCase = logging.WARNING
if model_args.verbose_logging:
__UpperCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__UpperCAmelCase = logging.INFO
logger.setLevel(snake_case_ )
@dataclass
class _UpperCAmelCase :
a__ : str = field(
default=__lowercase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a__ : Optional[str] = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to \'train\'"
} , )
a__ : Optional[str] = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'"
)
} , )
a__ : Optional[str] = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to \'file\'"} , )
a__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a__ : Optional[int] = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there\'s no validation split"
} , )
a__ : Optional[int] = field(
default=__lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
a__ : Optional[float] = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _UpperCAmelCase :
a__ : WavaVecaForPreTraining
a__ : WavaVecaFeatureExtractor
a__ : Union[bool, str] = "longest"
a__ : Optional[int] = None
a__ : Optional[int] = None
def __call__( self : Union[str, Any] , _lowercase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
__UpperCAmelCase = self.feature_extractor.pad(
UpperCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
__UpperCAmelCase = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
__UpperCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__UpperCAmelCase = 1
__UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__UpperCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase__ , min_masks=2 , )
return batch
class _UpperCAmelCase ( __lowercase ):
def __init__( self : Optional[int] , *_lowercase : Optional[int] , _lowercase : Optional[int]=1 , _lowercase : Dict=0 , _lowercase : List[str]=1.0 , **_lowercase : int ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
__UpperCAmelCase = 0
__UpperCAmelCase = max_gumbel_temp
__UpperCAmelCase = min_gumbel_temp
__UpperCAmelCase = gumbel_temp_decay
def a ( self : Any , _lowercase : nn.Module , _lowercase : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
__UpperCAmelCase = self._prepare_inputs(UpperCAmelCase__ )
if self.use_amp:
with autocast():
__UpperCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
else:
__UpperCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCAmelCase = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__UpperCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def lowercase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(snake_case_ , snake_case_ )
# Downloading and loading a dataset from the hub.
__UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__UpperCAmelCase = DatasetDict()
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__UpperCAmelCase = DatasetDict()
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
__UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case_ )
def prepare_dataset(snake_case_ :Optional[Any] ):
# check that all files have the correct sampling rate
__UpperCAmelCase , __UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__UpperCAmelCase = datasets.map(
snake_case_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
__UpperCAmelCase = vectorized_datasets.filter(
lambda snake_case_ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(snake_case_ :Dict ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__UpperCAmelCase = vectorized_datasets.map(
snake_case_ , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__UpperCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
__UpperCAmelCase = WavaVecaForPreTraining(snake_case_ )
__UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=snake_case_ , feature_extractor=snake_case_ )
__UpperCAmelCase = WavaVecaPreTrainer(
model=snake_case_ , data_collator=snake_case_ , args=snake_case_ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=snake_case_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 332 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__A : Tuple = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__A : Tuple = logging.get_logger(__name__)
class _a ( __lowercase):
"""simple docstring"""
UpperCamelCase__ = '''maskformer'''
UpperCamelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCamelCase__ = ['''resnet''', '''swin''']
UpperCamelCase__ = ['''detr''']
def __init__( self : Optional[Any] , __UpperCamelCase : int = 2_5_6 , __UpperCamelCase : int = 2_5_6 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[Dict] = None , __UpperCamelCase : Optional[Dict] = None , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : float = 1.0 , __UpperCamelCase : float = 2_0.0 , __UpperCamelCase : Optional[bool] = None , **__UpperCamelCase : Tuple , )->Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_UpperCAmelCase = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase = backbone_config.pop('''model_type''' )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_UpperCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
_UpperCAmelCase = (
decoder_config.pop('''model_type''' ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase = CONFIG_MAPPING[decoder_type]
_UpperCAmelCase = config_class.from_dict(UpperCAmelCase__ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = decoder_config
# main feature dimension for the model
_UpperCAmelCase = fpn_feature_size
_UpperCAmelCase = mask_feature_size
# initializer
_UpperCAmelCase = init_std
_UpperCAmelCase = init_xavier_std
# Hungarian matcher && loss
_UpperCAmelCase = cross_entropy_weight
_UpperCAmelCase = dice_weight
_UpperCAmelCase = mask_weight
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = no_object_weight
_UpperCAmelCase = output_auxiliary_logits
_UpperCAmelCase = self.decoder_config.encoder_attention_heads
_UpperCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def lowercase__ ( cls : Union[str, Any] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : PretrainedConfig , **__UpperCamelCase : str )->str:
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowercase__ ( self : List[str] )->Dict[str, any]:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.decoder_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 260 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = len(lowerCamelCase__ ), len(grid[0] )
if (
min(lowerCamelCase__ , lowerCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCamelCase : int = 0
count += depth_first_search(lowerCamelCase__ , row + 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , row - 1 , lowerCamelCase__ , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col + 1 , lowerCamelCase__ )
count += depth_first_search(lowerCamelCase__ , lowerCamelCase__ , col - 1 , lowerCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __lowercase ( __lowercase , __lowercase ):
"""simple docstring"""
UpperCamelCase : Any = '''resnet'''
UpperCamelCase : Optional[Any] = ['''basic''', '''bottleneck''']
def __init__( self , A=3 , A=64 , A=[2_56, 5_12, 10_24, 20_48] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
lowerCamelCase = num_channels
lowerCamelCase = embedding_size
lowerCamelCase = hidden_sizes
lowerCamelCase = depths
lowerCamelCase = layer_type
lowerCamelCase = hidden_act
lowerCamelCase = downsample_in_first_stage
lowerCamelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCamelCase , lowerCamelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
class __lowercase ( __lowercase ):
"""simple docstring"""
UpperCamelCase : Any = version.parse("1.11" )
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ) -> float:
'''simple docstring'''
return 1e-3
| 252 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
"""simple docstring"""
__A = {str(digit): digit**5 for digit in range(10)}
def UpperCamelCase__ ( lowercase__ : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase__ ) )
def UpperCamelCase__ ( ):
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(lowercase__ ) )
if __name__ == "__main__":
print(solution())
| 148 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 9
__lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCAmelCase = kruskal(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_UpperCamelCase ) == sorted(_UpperCamelCase )
| 57 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _a ( UpperCamelCase_ : int ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase_ , id=UpperCamelCase_ )
| 340 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
from math import factorial
def a__ ( A_ = 100 ):
'''simple docstring'''
return sum(int(A_ ) for x in str(factorial(A_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 88 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ),
} ), codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''], reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
], )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=4, __magic_name__=False ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = compute_bleu(
reference_corpus=UpperCAmelCase__, translation_corpus=UpperCAmelCase__, max_order=UpperCAmelCase__, smooth=UpperCAmelCase__ )
((UpperCamelCase__) ,(UpperCamelCase__) ,(UpperCamelCase__) ,(UpperCamelCase__) ,(UpperCamelCase__) ,(UpperCamelCase__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 201 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase (__lowercase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = '''speech_to_text_2'''
_UpperCAmelCase :Any = ['''past_key_values''']
_UpperCAmelCase :Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _UpperCAmelCase=10000 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , **_UpperCAmelCase , ):
lowercase__: Optional[Any] = vocab_size
lowercase__: List[Any] = d_model
lowercase__: Optional[int] = decoder_ffn_dim
lowercase__: Optional[int] = decoder_layers
lowercase__: Any = decoder_attention_heads
lowercase__: Tuple = dropout
lowercase__: Union[str, Any] = attention_dropout
lowercase__: List[Any] = activation_dropout
lowercase__: Union[str, Any] = activation_function
lowercase__: Dict = init_std
lowercase__: Tuple = decoder_layerdrop
lowercase__: Optional[Any] = use_cache
lowercase__: Tuple = decoder_layers
lowercase__: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__: str = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 177 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
class snake_case :
def __init__( self : List[str] , UpperCamelCase__ : Optional[int])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = arr.split(",")
def lowercase_ ( self : Union[str, Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Any = [int(self.array[0])] * len(self.array)
__lowerCAmelCase: Any = [int(self.array[0])] * len(self.array)
for i in range(1 , len(self.array)):
__lowerCAmelCase: List[str] = max(
int(self.array[i]) + sum_value[i - 1] , int(self.array[i]))
__lowerCAmelCase: Union[str, Any] = max(sum_value[i] , rear[i - 1])
return rear[len(self.array) - 1]
if __name__ == "__main__":
__A = input("please input some numbers:")
__A = SubArray(whole_array)
__A = array.solve_sub_array()
print(("the results is:", re))
| 217 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_lowercase : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
def __init__( self : List[Any] , *_lowercase : List[str] , **_lowercase : Optional[Any] ):
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 332 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _a ( __lowercase):
"""simple docstring"""
def __lt__( self : Optional[int] , __UpperCamelCase : List[str] )->List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , __UpperCamelCase : List[str] )->Tuple:
return self[-1] == other[-1]
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = []
# sort into stacks
for element in collection:
_UpperCAmelCase = Stack([element] )
_UpperCAmelCase = bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if i != len(_SCREAMING_SNAKE_CASE ):
stacks[i].append(_SCREAMING_SNAKE_CASE )
else:
stacks.append(_SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
_UpperCAmelCase = merge(*(reversed(_SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
__A : Any = input("Enter numbers separated by a comma:\n").strip()
__A : Optional[int] = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 260 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=False , **lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
__lowerCamelCase : List[Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path='train' , **lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = tok.pad_token_id
def get_lens(lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = tqdm(
DataLoader(lowerCamelCase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowerCamelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__lowerCamelCase : Any = []
for batch in dl:
__lowerCamelCase : int = batch['input_ids'].ne(lowerCamelCase__ ).sum(1 ).tolist()
__lowerCamelCase : Union[str, Any] = batch['labels'].ne(lowerCamelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase__ , lowerCamelCase__ ):
max_lens.append(max(lowerCamelCase__ , lowerCamelCase__ ) )
else:
max_lens.extend(lowerCamelCase__ )
return max_lens
__lowerCamelCase : Any = get_lens(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path='val' , **lowerCamelCase__ )
__lowerCamelCase : Tuple = get_lens(lowerCamelCase__ )
pickle_save(lowerCamelCase__ , train_ds.len_file )
pickle_save(lowerCamelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 73 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
UpperCAmelCase : Union[str, Any] = "huggingface-tools/default-prompts"
UpperCAmelCase : str = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any]="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
lowerCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowerCamelCase__ ) is not None:
return prompt_or_repo_id
lowerCamelCase = cached_file(
lowerCamelCase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 252 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def UpperCamelCase__ ( lowercase__ : int ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
snake_case : str = precision
snake_case : List[Any] = ceil(precision / 14 )
snake_case : Dict = 42_6880 * Decimal(1_0005 ).sqrt()
snake_case : Tuple = 1
snake_case : Optional[int] = 1359_1409
snake_case : Optional[int] = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
snake_case : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__A = 50
print(f'The first {n} digits of pi is: {pi(n)}')
| 148 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , "num_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.0_2 , __a=1e-1_2 , __a=True , __a=True , __a=2 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = patch_stride
__lowerCAmelCase = patch_padding
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = num_heads
__lowerCAmelCase = stride_kv
__lowerCAmelCase = depth
__lowerCAmelCase = cls_token
__lowerCAmelCase = attention_drop_rate
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = CvtModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ )
__lowerCAmelCase = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = CvtForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __lowercase ,__lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : str =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] =False
__UpperCAmelCase : Dict =False
__UpperCAmelCase : List[str] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Tuple =False
def snake_case ( self ):
__lowerCAmelCase = CvtModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
return
@unittest.skip(reason="Cvt does not output attentions" )
def snake_case ( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def snake_case ( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(UpperCAmelCase__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
pass
@slow
def snake_case ( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = CvtModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self ):
__lowerCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**UpperCAmelCase__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
__lowerCAmelCase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 57 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = '''Hello world! cécé herlolip'''
a_ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCamelCase_ , large=UpperCamelCase_ , share_emb=UpperCamelCase_ , use_bert_emb=UpperCamelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
lowerCAmelCase__ = torch.load(UpperCamelCase_ , lambda UpperCamelCase_ , UpperCamelCase_ : storage )
lowerCAmelCase__ = AbsSummarizer(UpperCamelCase_ , torch.device("cpu" ) , UpperCamelCase_ )
original.eval()
lowerCAmelCase__ = BertAbsSummarizer(UpperCamelCase_ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models\' outputs are identical" )
lowerCAmelCase__ = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
lowerCAmelCase__ = tokenizer.encode("This is sample éàalj\'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase_ )) )
lowerCAmelCase__ = torch.tensor(UpperCamelCase_ ).unsqueeze(0 )
lowerCAmelCase__ = tokenizer.encode("This is sample 3 éàalj\'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase_ )) )
lowerCAmelCase__ = torch.tensor(UpperCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase__ = encoder_input_ids
lowerCAmelCase__ = decoder_input_ids
lowerCAmelCase__ = lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = lowerCAmelCase__ = None
lowerCAmelCase__ = lowerCAmelCase__ = None
lowerCAmelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase__ = original(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )[0]
lowerCAmelCase__ = original.generator(UpperCamelCase_ )
lowerCAmelCase__ = new_model(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )[0]
lowerCAmelCase__ = new_model.generator(UpperCamelCase_ )
lowerCAmelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCamelCase_ ) )
lowerCAmelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCamelCase_ ) )
lowerCAmelCase__ = torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model\'s state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 340 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
__magic_name__ = load_dataset("""ashraq/esc50""" )
__magic_name__ = dataset["""train"""]["""audio"""][-1]["""array"""]
__magic_name__ = audio_classifier(UpperCAmelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
@require_torch
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
__magic_name__ = load_dataset("""ashraq/esc50""" )
__magic_name__ = dataset["""train"""]["""audio"""][-1]["""array"""]
__magic_name__ = audio_classifier(UpperCAmelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
__magic_name__ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
__magic_name__ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
| 88 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=3, __magic_name__=18, __magic_name__=30, __magic_name__=400, __magic_name__=True, __magic_name__=None, __magic_name__=True, ) -> int:
"""simple docstring"""
UpperCamelCase__ : Tuple = size if size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Any = batch_size
UpperCamelCase__ : Optional[int] = num_channels
UpperCamelCase__ : str = image_size
UpperCamelCase__ : Union[str, Any] = min_resolution
UpperCamelCase__ : Tuple = max_resolution
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : Any = size
UpperCamelCase__ : str = apply_ocr
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase__ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__, '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__, '''apply_ocr''' ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
UpperCamelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, Image.Image )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0], return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
self.assertIsInstance(encoding.words, UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes, UpperCAmelCase__ )
# Test batched
UpperCamelCase__ : List[str] = image_processing(UpperCAmelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__, numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, np.ndarray )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
UpperCamelCase__ : Optional[Any] = image_processing(UpperCAmelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCAmelCase__, torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__, torch.Tensor )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
UpperCamelCase__ : Dict = image_processing(UpperCAmelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# with apply_OCR = True
UpperCamelCase__ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase__ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' )
UpperCamelCase__ : Tuple = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
UpperCamelCase__ : str = image_processing(UpperCAmelCase__, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase__ : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
UpperCamelCase__ : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, UpperCAmelCase__ )
self.assertListEqual(encoding.boxes, UpperCAmelCase__ )
# with apply_OCR = False
UpperCamelCase__ : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
UpperCamelCase__ : Optional[int] = image_processing(UpperCAmelCase__, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
| 201 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=19 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Optional[int] = parent
lowercase__: List[str] = batch_size
lowercase__: Dict = seq_length
lowercase__: Union[str, Any] = is_training
lowercase__: Dict = use_input_mask
lowercase__: Dict = use_token_type_ids
lowercase__: Optional[int] = use_labels
lowercase__: Tuple = vocab_size
lowercase__: Dict = hidden_size
lowercase__: Union[str, Any] = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: Tuple = intermediate_size
lowercase__: Tuple = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: Tuple = type_vocab_size
lowercase__: List[str] = type_sequence_label_size
lowercase__: Optional[Any] = initializer_range
lowercase__: Any = num_labels
lowercase__: Optional[Any] = num_choices
lowercase__: List[Any] = scope
def _snake_case ( self ):
lowercase__: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Dict = None
if self.use_input_mask:
lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Any = None
lowercase__: Tuple = None
lowercase__: List[Any] = None
if self.use_labels:
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
lowercase__: Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCAmelCase__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = EsmForProteinFolding(config=UpperCAmelCase__ ).float()
model.to(UpperCAmelCase__ )
model.eval()
lowercase__: Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowercase__: Optional[Any] = model(UpperCAmelCase__ )
lowercase__: Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _snake_case ( self ):
lowercase__: Dict = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): List[str] = config_and_inputs
lowercase__: List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (__lowercase ,__lowercase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :str = False
_UpperCAmelCase :Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
_UpperCAmelCase :Union[str, Any] = ()
_UpperCAmelCase :List[Any] = {} if is_torch_available() else {}
_UpperCAmelCase :Optional[Any] = False
def _snake_case ( self ):
lowercase__: int = EsmFoldModelTester(self )
lowercase__: List[str] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@unittest.skip('''Does not support attention outputs''' )
def _snake_case ( self ):
pass
@unittest.skip
def _snake_case ( self ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _snake_case ( self ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold only has one output format.''' )
def _snake_case ( self ):
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _snake_case ( self ):
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def _snake_case ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
pass
@require_torch
class UpperCAmelCase (__lowercase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Dict = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
lowercase__: int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__: List[str] = model(UpperCAmelCase__ )['''positions''']
lowercase__: List[Any] = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCAmelCase__ , atol=1e-4 ) )
| 177 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__A = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__A = {"facebook/blenderbot-3B": 128}
class snake_case ( __lowercase ):
SCREAMING_SNAKE_CASE_ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE_ : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str="replace" , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : Optional[Any]="</s>" , UpperCamelCase__ : Any="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Union[str, Any]="<mask>" , UpperCamelCase__ : str=False , UpperCamelCase__ : Union[str, Any]=True , **UpperCamelCase__ : Optional[int] , )-> int:
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
__lowerCAmelCase: List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase__) != add_prefix_space:
__lowerCAmelCase: str = getattr(UpperCAmelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: Any = add_prefix_space
__lowerCAmelCase: List[Any] = pre_tok_class(**UpperCAmelCase__)
__lowerCAmelCase: Tuple = add_prefix_space
__lowerCAmelCase: Any = "post_processor"
__lowerCAmelCase: int = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: Optional[Any] = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: List[str] = tuple(state["cls"])
__lowerCAmelCase: Dict = False
if state.get("add_prefix_space" , UpperCAmelCase__) != add_prefix_space:
__lowerCAmelCase: str = add_prefix_space
__lowerCAmelCase: Optional[Any] = True
if state.get("trim_offsets" , UpperCAmelCase__) != trim_offsets:
__lowerCAmelCase: Union[str, Any] = trim_offsets
__lowerCAmelCase: List[str] = True
if changes_to_apply:
__lowerCAmelCase: int = getattr(UpperCAmelCase__ , state.pop("type"))
__lowerCAmelCase: Dict = component_class(**UpperCAmelCase__)
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : int , UpperCamelCase__ : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: List[str] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else value
__lowerCAmelCase: Any = value
def lowercase_ ( self : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[str])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Tuple = kwargs.get("is_split_into_words" , UpperCAmelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__)
def lowercase_ ( self : List[str] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[str])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Tuple = kwargs.get("is_split_into_words" , UpperCAmelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__)
def lowercase_ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: str = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__)
return tuple(UpperCAmelCase__)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowercase_ ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : "Conversation")-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__)
__lowerCAmelCase: int = " ".join(UpperCAmelCase__)
__lowerCAmelCase: List[Any] = self.encode(UpperCAmelCase__)
if len(UpperCAmelCase__) > self.model_max_length:
__lowerCAmelCase: Any = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 217 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class _UpperCAmelCase ( __lowercase ):
a__ : Optional[Any] = '''t5'''
a__ : Optional[Any] = ['''past_key_values''']
a__ : List[str] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , _lowercase : int=3_21_28 , _lowercase : Optional[int]=5_12 , _lowercase : Tuple=64 , _lowercase : Union[str, Any]=20_48 , _lowercase : str=6 , _lowercase : Union[str, Any]=None , _lowercase : int=8 , _lowercase : Tuple=32 , _lowercase : Dict=1_28 , _lowercase : List[Any]=0.1 , _lowercase : Optional[Any]=1E-6 , _lowercase : str=1.0 , _lowercase : str="relu" , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : Dict=0 , _lowercase : Tuple=1 , **_lowercase : str , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = d_model
__UpperCAmelCase = d_kv
__UpperCAmelCase = d_ff
__UpperCAmelCase = num_layers
__UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase = num_heads
__UpperCAmelCase = relative_attention_num_buckets
__UpperCAmelCase = relative_attention_max_distance
__UpperCAmelCase = dropout_rate
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = feed_forward_proj
__UpperCAmelCase = use_cache
__UpperCAmelCase = self.feed_forward_proj.split('''-''' )
__UpperCAmelCase = act_info[-1]
__UpperCAmelCase = act_info[0] == '''gated'''
if len(UpperCAmelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
class _UpperCAmelCase ( __lowercase ):
@property
def a ( self : Tuple ):
__UpperCAmelCase = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__UpperCAmelCase = '''past_encoder_sequence + sequence'''
__UpperCAmelCase = {0: '''batch'''}
__UpperCAmelCase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
__UpperCAmelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction='''inputs''' )
return common_inputs
@property
def a ( self : Optional[Any] ):
return 13
| 332 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A : List[Any] = random.Random()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict=1.0 , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : str=4_0_0 , __UpperCamelCase : List[Any]=2_0_0_0 , __UpperCamelCase : Dict=1 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : List[Any]=1_6_0_0_0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Tuple=True , )->List[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def lowercase__ ( self : Any )->List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[int]=False )->Dict:
def _flatten(__UpperCamelCase : Tuple ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
_UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
class _a ( __lowercase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = WavaVecaFeatureExtractor
def lowercase__ ( self : Optional[int] )->Optional[Any]:
_UpperCAmelCase = WavaVecaFeatureExtractionTester(self )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] )->Union[str, Any]:
self.assertTrue(np.all(np.mean(UpperCAmelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def lowercase__ ( self : List[str] )->Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase = np.asarray(UpperCAmelCase__ )
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) )
def lowercase__ ( self : Optional[int] )->List[str]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
_UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''np''' )
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def lowercase__ ( self : int )->str:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
_UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
_UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase = feat_extract(UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding=UpperCAmelCase__ )
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def lowercase__ ( self : Optional[Any] )->Tuple:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowercase__ ( self : Any )->Tuple:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
_UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def lowercase__ ( self : Optional[Any] )->List[Any]:
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowercase__ ( self : Any )->Optional[int]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCAmelCase = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 260 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
a =get_logger(__name__)
a =r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class A_ :
@add_start_docstrings(UpperCAmelCase__)
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.")
class A_ :
@add_start_docstrings(UpperCAmelCase__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.")
class A_ ( __lowercase ):
@add_start_docstrings(UpperCAmelCase__)
def __call__( self : int ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Tuple):
for processor in self:
__lowerCamelCase : Tuple = inspect.signature(processor.__call__).parameters
if len(UpperCAmelCase__) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys())} for "
F"{processor.__class__} are passed to the logits processor.")
__lowerCamelCase : str = processor(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__)
else:
__lowerCamelCase : Optional[Any] = processor(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
return scores
class A_ ( __lowercase ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : float):
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}")
__lowerCamelCase : Any = temperature
def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Dict = scores / self.temperature
return scores
class A_ ( __lowercase ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : float = -float('Inf') ,SCREAMING_SNAKE_CASE__ : int = 1):
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}")
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
__lowerCamelCase : Union[str, Any] = top_p
__lowerCamelCase : int = filter_value
__lowerCamelCase : Any = min_tokens_to_keep
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase , __lowerCamelCase : Dict = lax.top_k(UpperCAmelCase__ ,scores.shape[-1])
__lowerCamelCase : Tuple = jnp.full_like(UpperCAmelCase__ ,self.filter_value)
__lowerCamelCase : Dict = jax.nn.softmax(UpperCAmelCase__ ,axis=-1).cumsum(axis=-1)
__lowerCamelCase : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCamelCase : Union[str, Any] = jnp.roll(UpperCAmelCase__ ,1)
score_mask |= score_mask.at[:, 0].set(UpperCAmelCase__)
# min tokens to keep
__lowerCamelCase : Optional[int] = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase__)
__lowerCamelCase : Optional[int] = jnp.where(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : Optional[int] = jax.lax.sort_key_val(UpperCAmelCase__ ,UpperCAmelCase__)[-1]
return next_scores
class A_ ( __lowercase ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = -float('Inf') ,SCREAMING_SNAKE_CASE__ : int = 1):
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}")
__lowerCamelCase : List[Any] = max(UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : List[str] = filter_value
def __call__( self : int ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase , __lowerCamelCase : Optional[int] = scores.shape
__lowerCamelCase : int = jnp.full(batch_size * vocab_size ,self.filter_value)
__lowerCamelCase : Optional[int] = min(self.top_k ,scores.shape[-1]) # Safety check
__lowerCamelCase , __lowerCamelCase : Optional[Any] = lax.top_k(UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : int = jnp.broadcast_to((jnp.arange(UpperCAmelCase__) * vocab_size)[:, None] ,(batch_size, topk)).flatten()
__lowerCamelCase : int = topk_scores.flatten()
__lowerCamelCase : Dict = topk_indices.flatten() + shift
__lowerCamelCase : Union[str, Any] = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase__)
__lowerCamelCase : Dict = next_scores_flat.reshape(UpperCAmelCase__ ,UpperCAmelCase__)
return next_scores
class A_ ( __lowercase ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Dict = bos_token_id
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[int] = jnp.full(scores.shape ,-float('inf'))
__lowerCamelCase : Union[str, Any] = 1 - jnp.bool_(cur_len - 1)
__lowerCamelCase : List[str] = jnp.where(UpperCAmelCase__ ,new_scores.at[:, self.bos_token_id].set(0) ,UpperCAmelCase__)
return scores
class A_ ( __lowercase ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = max_length
__lowerCamelCase : List[Any] = eos_token_id
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[Any] = jnp.full(scores.shape ,-float('inf'))
__lowerCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1)
__lowerCamelCase : str = jnp.where(UpperCAmelCase__ ,new_scores.at[:, self.eos_token_id].set(0) ,UpperCAmelCase__)
return scores
class A_ ( __lowercase ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
__lowerCamelCase : str = min_length
__lowerCamelCase : Any = eos_token_id
def __call__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
# create boolean flag to decide if min length penalty should be applied
__lowerCamelCase : Union[str, Any] = 1 - jnp.clip(cur_len - self.min_length ,0 ,1)
__lowerCamelCase : Union[str, Any] = jnp.where(UpperCAmelCase__ ,scores.at[:, self.eos_token_id].set(-float('inf')) ,UpperCAmelCase__)
return scores
class A_ ( __lowercase ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : Union[str, Any] = list(UpperCAmelCase__)
__lowerCamelCase : str = begin_index
def __call__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = 1 - jnp.bool_(cur_len - self.begin_index)
__lowerCamelCase : Tuple = jnp.where(UpperCAmelCase__ ,scores.at[:, self.begin_suppress_tokens].set(-float('inf')) ,UpperCAmelCase__)
return scores
class A_ ( __lowercase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : list):
__lowerCamelCase : List[str] = list(UpperCAmelCase__)
def __call__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : str = scores.at[..., self.suppress_tokens].set(-float('inf'))
return scores
class A_ ( __lowercase ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : str = dict(UpperCAmelCase__)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCamelCase : str = jnp.ones((max(force_token_map.keys()) + 1) ,dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCamelCase : Dict = force_token_array.at[index].set(UpperCAmelCase__)
__lowerCamelCase : str = jnp.intaa(UpperCAmelCase__)
def __call__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
def _force_token(SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[int] = scores.shape[0]
__lowerCamelCase : Union[str, Any] = self.force_token_array[generation_idx]
__lowerCamelCase : List[str] = jnp.ones_like(UpperCAmelCase__ ,dtype=scores.dtype) * -float('inf')
__lowerCamelCase : Tuple = jnp.zeros((batch_size, 1) ,dtype=scores.dtype)
__lowerCamelCase : Tuple = lax.dynamic_update_slice(UpperCAmelCase__ ,UpperCAmelCase__ ,(0, current_token))
return new_scores
__lowerCamelCase : str = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(UpperCAmelCase__) ,lambda: scores ,) ,)
return scores
class A_ ( __lowercase ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = generate_config.eos_token_id
__lowerCamelCase : Dict = generate_config.no_timestamps_token_id
__lowerCamelCase : Optional[Any] = generate_config.no_timestamps_token_id + 1
__lowerCamelCase : List[str] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCAmelCase__ ,'max_initial_timestamp_index'):
__lowerCamelCase : int = generate_config.max_initial_timestamp_index
else:
__lowerCamelCase : str = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCamelCase : Any = model_config.vocab_size
def __call__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]):
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCamelCase : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf'))
def handle_pairs(SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = jnp.where((cur_len - self.begin_index) >= 1 ,UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,UpperCAmelCase__ ,)
__lowerCamelCase : Optional[int] = jnp.where((cur_len - self.begin_index) < 2 ,UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : int = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,UpperCAmelCase__ ,UpperCAmelCase__ ,)
return jnp.where(
UpperCAmelCase__ ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf')) ,scores_k.at[: self.eos_token_id].set(-float('inf')) ,) ,UpperCAmelCase__ ,)
__lowerCamelCase : str = jax.vmap(UpperCAmelCase__)(UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : Tuple = jnp.where(cur_len == self.begin_index ,UpperCAmelCase__ ,UpperCAmelCase__)
__lowerCamelCase : Optional[int] = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,UpperCAmelCase__ ,)
__lowerCamelCase : Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCamelCase : Optional[int] = jnp.where(
UpperCAmelCase__ ,scores.at[:, last_allowed + 1 :].set(-float('inf')) ,UpperCAmelCase__ ,)
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCamelCase : Optional[int] = jax.nn.log_softmax(UpperCAmelCase__ ,axis=-1)
def handle_cumulative_probs(SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1)
__lowerCamelCase : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf')) ,UpperCAmelCase__ ,)
__lowerCamelCase : Tuple = jax.vmap(UpperCAmelCase__)(UpperCAmelCase__ ,UpperCAmelCase__)
return scores
| 73 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 0 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase : Any = "docs/source/en/_toctree.yml"
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
lowerCamelCase = defaultdict(lowerCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCamelCase = [key for key, value in counts.items() if value > 1]
lowerCamelCase = []
for duplicate_key in duplicates:
lowerCamelCase = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(lowerCamelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : s["title"].lower() )
def __lowerCamelCase ( lowerCamelCase__ : List[Any]=False ):
'''simple docstring'''
with open(lowerCamelCase__ , encoding="""utf-8""" ) as f:
lowerCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
lowerCamelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCamelCase = api_doc[model_idx]["""sections"""]
lowerCamelCase = [(idx, section) for idx, section in enumerate(lowerCamelCase__ ) if """sections""" in section]
lowerCamelCase = False
for idx, modality_doc in modalities_docs:
lowerCamelCase = modality_doc["""sections"""]
lowerCamelCase = clean_model_doc_toc(lowerCamelCase__ )
if old_modality_doc != new_modality_doc:
lowerCamelCase = True
if overwrite:
lowerCamelCase = new_modality_doc
if diff:
if overwrite:
lowerCamelCase = model_doc
lowerCamelCase = api_doc
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(lowerCamelCase__ , allow_unicode=lowerCamelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase : List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 252 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 0 |
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : int = 1000 ):
snake_case , snake_case : Tuple = 1, 1
snake_case : Optional[int] = 2
while True:
snake_case : Optional[int] = 0
snake_case : List[Any] = fa + fa
snake_case , snake_case : str = fa, f
index += 1
for _ in str(lowercase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 148 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A : List[Any] = sys.version_info >= (3, 1_0)
def _lowerCamelCase ( _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCamelCase )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : int
__UpperCAmelCase : float
__UpperCAmelCase : str
__UpperCAmelCase : bool
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : int =4_2
__UpperCAmelCase : str =field(default="""toto""" ,metadata={"""help""": """help message"""} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : bool =False
__UpperCAmelCase : bool =True
__UpperCAmelCase : Optional[bool] =None
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] ='''titi'''
__UpperCAmelCase : List[str] ='''toto'''
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
__UpperCAmelCase : List[str] ='''titi'''
__UpperCAmelCase : Any ='''toto'''
__UpperCAmelCase : Union[str, Any] =4_2
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : BasicEnum ="toto"
def snake_case ( self ):
__lowerCAmelCase = BasicEnum(self.foo )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : MixedTypeEnum ="toto"
def snake_case ( self ):
__lowerCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[int] =None
__UpperCAmelCase : Optional[float] =field(default=__lowercase ,metadata={"""help""": """help message"""} )
__UpperCAmelCase : Optional[str] =None
__UpperCAmelCase : Optional[List[str]] =list_field(default=[] )
__UpperCAmelCase : Optional[List[int]] =list_field(default=[] )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : List[int] =list_field(default=[] )
__UpperCAmelCase : List[int] =list_field(default=[1, 2, 3] )
__UpperCAmelCase : List[str] =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__UpperCAmelCase : List[float] =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : List[int] =field()
__UpperCAmelCase : str =field()
__UpperCAmelCase : BasicEnum =field()
def snake_case ( self ):
__lowerCAmelCase = BasicEnum(self.required_enum )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : int
__UpperCAmelCase : "BasicEnum" =field()
__UpperCAmelCase : "Optional[bool]" =None
__UpperCAmelCase : "str" =field(default="""toto""" ,metadata={"""help""": """help message"""} )
__UpperCAmelCase : "List[str]" =list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : bool =False
__UpperCAmelCase : bool =True
__UpperCAmelCase : bool | None =None
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : int | None =None
__UpperCAmelCase : float | None =field(default=__lowercase ,metadata={"""help""": """help message"""} )
__UpperCAmelCase : str | None =None
__UpperCAmelCase : list[str] | None =list_field(default=[] )
__UpperCAmelCase : list[int] | None =list_field(default=[] )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self , __a , __a ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != "container"}
__lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , UpperCAmelCase__ ) and yy.get("choices" , UpperCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](UpperCAmelCase__ ) , yy["type"](UpperCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("--bar" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("--baz" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("--flag" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs="?" )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((__lowerCAmelCase ) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ )
self.assertFalse(example.flag )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=UpperCAmelCase__ )
expected.add_argument("--baz" , default="toto" , type=UpperCAmelCase__ , help="help message" )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs="?" )
expected.add_argument("--baz" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=UpperCAmelCase__ , dest="baz" )
expected.add_argument("--opt" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
__lowerCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
__lowerCAmelCase = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
__lowerCAmelCase = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
__lowerCAmelCase = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
__lowerCAmelCase = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__lowerCAmelCase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__lowerCAmelCase = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__lowerCAmelCase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
__lowerCAmelCase = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case ( self ):
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Literal["titi", "toto", 4_2] ="toto"
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__lowerCAmelCase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__lowerCAmelCase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=UpperCAmelCase__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=UpperCAmelCase__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=UpperCAmelCase__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
__lowerCAmelCase = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def snake_case ( self ):
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument("--bar" , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help="help message" )
expected.add_argument("--baz" , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=UpperCAmelCase__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=UpperCAmelCase__ )
__lowerCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) )
__lowerCAmelCase = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=12 , bar=3.1_4 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument("--required_str" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=UpperCAmelCase__ , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=UpperCAmelCase__ , )
expected.add_argument("--opt" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
expected.add_argument("--baz" , default="toto" , type=UpperCAmelCase__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
__lowerCAmelCase = parser.parse_dict(UpperCAmelCase__ )[0]
__lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , "temp_json" )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
__lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
__lowerCAmelCase = {
"foo": 12,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = os.path.join(UpperCAmelCase__ , "temp_yaml" )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
__lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 57 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowerCAmelCase__ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
lowerCAmelCase__ = "A painting of a squirrel eating a burger"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCAmelCase__ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler("sample_euler" )
lowerCAmelCase__ = "A painting of a squirrel eating a burger"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCAmelCase__ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
lowerCAmelCase__ = "A painting of a squirrel eating a burger"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 340 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 0 |
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1, A_, A_, A_ )
move_disk(A_, A_ )
move_tower(height - 1, A_, A_, A_ )
def a__ ( A_, A_ ):
'''simple docstring'''
print("""moving disk from""", A_, """to""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = int(input("""Height of hanoi: """ ).strip() )
move_tower(A_, """A""", """B""", """C""" )
if __name__ == "__main__":
main()
| 88 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: str , __UpperCAmelCase: Tuple , __UpperCAmelCase: int ) -> List[Any]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase__ : str = np.full((len(__UpperCAmelCase ), sequence_length, 2) , __UpperCAmelCase )
else:
UpperCamelCase__ : Optional[Any] = np.full((len(__UpperCAmelCase ), sequence_length) , __UpperCAmelCase )
for i, tensor in enumerate(__UpperCAmelCase ):
if padding_side == "right":
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = tensor[:sequence_length]
else:
UpperCamelCase__ : Optional[int] = tensor[:sequence_length]
else:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase__ : int = tensor[:sequence_length]
else:
UpperCamelCase__ : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] ) -> List[str]:
UpperCamelCase__ : List[Any] = ord(__UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCamelCase__ : Any = unicodedata.category(__UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowercase__ ( __lowercase ):
'''simple docstring'''
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
a : int = -100
a : str = "pt"
def UpperCamelCase__ ( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
import torch
UpperCamelCase__ : int = '''label''' if '''label''' in features[0].keys() else '''labels'''
UpperCamelCase__ : List[str] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCamelCase__ : Union[str, Any] = self.tokenizer.pad(
UpperCAmelCase__, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''' if labels is None else None, )
if labels is None:
return batch
UpperCamelCase__ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1]
UpperCamelCase__ : Optional[Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCamelCase__ : Optional[Any] = [
list(UpperCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) for label in labels
]
else:
UpperCamelCase__ : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) + list(UpperCAmelCase__ ) for label in labels
]
UpperCamelCase__ : List[str] = [feature['''ner_tags'''] for feature in features]
UpperCamelCase__ : str = padding_tensor(UpperCAmelCase__, -1, UpperCAmelCase__, UpperCAmelCase__ )
UpperCamelCase__ : Dict = [feature['''original_entity_spans'''] for feature in features]
UpperCamelCase__ : List[str] = padding_tensor(UpperCAmelCase__, (-1, -1), UpperCAmelCase__, UpperCAmelCase__ )
UpperCamelCase__ : List[Any] = {k: torch.tensor(UpperCAmelCase__, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 201 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 ) -> List[Any]:
# Format the message.
if name is None:
lowercase__: Any = None
else:
lowercase__: Optional[Any] = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}'''
lowercase__: List[str] = fmt.format(__UpperCAmelCase )
# Print and recurse (if needed).
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if msg is not None:
print(__UpperCAmelCase )
for k in val.keys():
recursive_print(__UpperCAmelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCAmelCase , torch.Tensor ):
print(__UpperCAmelCase , ''':''' , val.size() )
else:
print(__UpperCAmelCase , ''':''' , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowercase__: str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase__: List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase__: Optional[Any] = param.view(*__UpperCAmelCase )
lowercase__: str = param.transpose(0 , 2 )
lowercase__: Union[str, Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase__: int = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase__: Optional[int] = param.view(*__UpperCAmelCase )
lowercase__: Dict = param.transpose(0 , 1 ).contiguous()
lowercase__: Union[str, Any] = param.view(*__UpperCAmelCase )
return param
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
# The converted output model.
lowercase__: int = {}
# old versions did not store training args
lowercase__: int = input_state_dict.get('''args''' , __UpperCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase__: List[str] = ds_args.padded_vocab_size
lowercase__: int = ds_args.max_position_embeddings
lowercase__: int = ds_args.hidden_size
lowercase__: Optional[Any] = ds_args.num_layers
lowercase__: int = ds_args.num_attention_heads
lowercase__: Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase__: List[Any] = config.n_head
# The hidden_size per head.
lowercase__: List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase__: int = input_state_dict['''checkpoint_version''']
else:
lowercase__: List[Any] = 0.0
# The model.
lowercase__: Any = input_state_dict['''model''']
# The language model.
lowercase__: Dict = model['''language_model''']
# The embeddings.
lowercase__: str = lm['''embedding''']
# The word embeddings.
lowercase__: Any = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
lowercase__: List[Any] = word_embeddings[: config.vocab_size, :]
lowercase__: List[str] = word_embeddings
# The position embeddings.
lowercase__: Dict = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase__: int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match""" )
# Store the position embeddings.
lowercase__: Optional[Any] = pos_embeddings
# The transformer.
lowercase__: Any = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
lowercase__: Optional[int] = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
lowercase__: Tuple = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase__: List[Any] = layer_re.match(__UpperCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase__: List[Any] = int(m.group(1 ) )
# The name of the operation.
lowercase__: Optional[int] = m.group(2 )
# Is it a weight or a bias?
lowercase__: Optional[int] = m.group(3 )
# The name of the layer.
lowercase__: List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
lowercase__: Optional[int] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
lowercase__: int = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase__: int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCAmelCase , __UpperCAmelCase )
lowercase__: List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase__: List[str] = torch.tensor(-1e4 , dtype=torch.floataa )
lowercase__: int = masked_bias
lowercase__: Optional[int] = fix_query_key_value_ordering(__UpperCAmelCase , __UpperCAmelCase , 3 , __UpperCAmelCase , __UpperCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase__: str = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase__: Union[str, Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase__: List[str] = fix_query_key_value_ordering(__UpperCAmelCase , __UpperCAmelCase , 3 , __UpperCAmelCase , __UpperCAmelCase )
# Store. No change of shape.
lowercase__: Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase__: Any = megatron_to_transformers[op_name]
lowercase__: Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase__: Optional[int] = megatron_to_transformers[op_name]
lowercase__: Any = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase__: str = transformer['''final_layernorm.weight''']
lowercase__: str = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase__: List[str] = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
# Create the argument parser.
lowercase__: Dict = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''' , type=__UpperCAmelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=__UpperCAmelCase , help='''An optional config json file describing the pre-trained model.''' , )
lowercase__: str = parser.parse_args()
# Extract the basename.
lowercase__: int = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
lowercase__: Any = torch.load(__UpperCAmelCase , map_location='''cpu''' )
else:
lowercase__: List[str] = torch.load(args.path_to_checkpoint , map_location='''cpu''' )
lowercase__: Optional[int] = input_state_dict.get('''args''' , __UpperCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase__: Optional[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
lowercase__: Optional[Any] = '''gelu_new'''
else:
lowercase__: Dict = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
lowercase__: Optional[Any] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
lowercase__: Dict = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type='''cls_index''' , summary_use_proj=__UpperCAmelCase , summary_activation=__UpperCAmelCase , summary_proj_to_labels=__UpperCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCAmelCase , use_cache=__UpperCAmelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
lowercase__: Any = GPTaConfig.from_json_file(args.config_file )
lowercase__: List[str] = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
lowercase__: Tuple = convert_megatron_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCAmelCase , __UpperCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase__: Dict = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase__: List[Any] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
lowercase__: Optional[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowercase__: Dict = '''gpt2'''
lowercase__: Optional[int] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowercase__: str = type(__UpperCAmelCase ).__name__
lowercase__: Dict = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(__UpperCAmelCase )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(__UpperCAmelCase )
# Store the state_dict to file.
lowercase__: str = os.path.join(__UpperCAmelCase , '''pytorch_model.bin''' )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 177 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( __lowercase, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = RoCBertTokenizer
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : int = filter_non_english
def lowercase_ ( self : Dict)-> Dict:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__lowerCAmelCase: Tuple = {}
__lowerCAmelCase: Optional[Any] = {}
for i, value in enumerate(UpperCAmelCase__):
__lowerCAmelCase: int = i
__lowerCAmelCase: Dict = i
__lowerCAmelCase: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__)
def lowercase_ ( self : Dict)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Optional[Any] = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(UpperCAmelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__) , [5, 6, 2, 5, 7, 8])
def lowercase_ ( self : Any)-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def lowercase_ ( self : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def lowercase_ ( self : List[str])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : str)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : List[str])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Any)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : List[str])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__lowerCAmelCase: Optional[int] = {}
for i, token in enumerate(UpperCAmelCase__):
__lowerCAmelCase: Union[str, Any] = i
__lowerCAmelCase: Dict = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def lowercase_ ( self : List[Any])-> List[str]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
__lowerCAmelCase: List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def lowercase_ ( self : Dict)-> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
__lowerCAmelCase: Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowerCAmelCase: Union[str, Any] = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
__lowerCAmelCase: int = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case") else False
__lowerCAmelCase: Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def lowercase_ ( self : str)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = ["的", "人", "有"]
__lowerCAmelCase: Optional[Any] = "".join(UpperCAmelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
__lowerCAmelCase: Tuple = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
__lowerCAmelCase: int = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
__lowerCAmelCase: Tuple = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
__lowerCAmelCase: Any = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__)
__lowerCAmelCase: List[Any] = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
__lowerCAmelCase: Dict = False
__lowerCAmelCase: List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
__lowerCAmelCase: Any = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
__lowerCAmelCase: Dict = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
__lowerCAmelCase: Optional[int] = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
__lowerCAmelCase: List[str] = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__)
__lowerCAmelCase: Tuple = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase: int = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__)
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
@slow
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[str] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Tuple = tokenizer.encode("你好" , add_special_tokens=UpperCAmelCase__)
__lowerCAmelCase: Any = tokenizer.encode("你是谁" , add_special_tokens=UpperCAmelCase__)
__lowerCAmelCase: Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__)
__lowerCAmelCase: Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase_ ( self : str)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.get_tokenizers(do_lower_case=UpperCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__lowerCAmelCase: Dict = "你好,你是谁"
__lowerCAmelCase: List[str] = tokenizer.tokenize(UpperCAmelCase__)
__lowerCAmelCase: Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase__)
__lowerCAmelCase: str = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__)
__lowerCAmelCase: Tuple = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__)
__lowerCAmelCase: Dict = tokenizer.prepare_for_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
__lowerCAmelCase: Optional[int] = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
| 217 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 0 |
"""simple docstring"""
import numpy as np
def lowercase__ ( snake_case_ :np.ndarray , snake_case_ :float ):
return np.where(vector > 0 , snake_case_ , (alpha * (np.exp(snake_case_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : str=1_0_0 , __UpperCamelCase : str=1_3 , __UpperCamelCase : Dict=3_0 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=3 , __UpperCamelCase : Any=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Any=4 , __UpperCamelCase : List[str]=3_7 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Tuple=3 , )->Dict:
_UpperCAmelCase = parent
_UpperCAmelCase = vocab_size
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def lowercase__ ( self : List[Any] )->List[str]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int )->str:
_UpperCAmelCase = FlaxBeitModel(config=UpperCAmelCase__ )
_UpperCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] )->Any:
_UpperCAmelCase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
_UpperCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : str )->Optional[int]:
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
_UpperCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxBeitForImageClassification(UpperCAmelCase__ )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(UpperCAmelCase__ )
def lowercase__ ( self : Any )->str:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class _a ( __lowercase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowercase__ ( self : Any )->None:
_UpperCAmelCase = FlaxBeitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[Any]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] )->Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCAmelCase__ )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ):
return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_UpperCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : Dict )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowercase__ ( self : Optional[int] )->str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowercase__ ( self : List[str] )->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def lowercase__ ( self : str )->Optional[Any]:
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
_UpperCAmelCase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str )->str:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Dict )->int:
_UpperCAmelCase = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
_UpperCAmelCase = np.ones((1, 1_9_6) , dtype=UpperCAmelCase__ )
# forward pass
_UpperCAmelCase = model(pixel_values=UpperCAmelCase__ , bool_masked_pos=UpperCAmelCase__ )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , UpperCAmelCase__ )
_UpperCAmelCase = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase__ , atol=1e-2 ) )
@slow
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='''np''' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase__ )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 1_0_0_0)
self.assertEqual(logits.shape , UpperCAmelCase__ )
_UpperCAmelCase = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
_UpperCAmelCase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase__ )
@slow
def lowercase__ ( self : int )->str:
_UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='''np''' )
# forward pass
_UpperCAmelCase = model(**UpperCAmelCase__ )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , UpperCAmelCase__ )
_UpperCAmelCase = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
_UpperCAmelCase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase__ )
| 260 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a =logging.get_logger(__name__)
class A_ ( __lowercase ):
def __init__( self : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : List[str]):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' ,UpperCAmelCase__ ,)
super().__init__(*UpperCAmelCase__ ,**UpperCAmelCase__)
| 73 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : List[str] = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def UpperCamelCase__ ( lowercase__ : Union[str, Any] , lowercase__ : List[str]=False ):
snake_case : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case : Optional[int] = ""
else:
snake_case : Tuple = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
snake_case : List[str] = in_proj_bias[: config.hidden_size]
snake_case : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : str ):
snake_case : List[str] = dct.pop(lowercase__ )
snake_case : List[Any] = val
def UpperCamelCase__ ( ):
snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : int = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : Dict ):
snake_case : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case : Any = 1000
snake_case : Optional[int] = "huggingface/label-files"
snake_case : List[str] = "imagenet-1k-id2label.json"
snake_case : Any = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
snake_case : List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case : str = idalabel
snake_case : List[str] = {v: k for k, v in idalabel.items()}
snake_case : List[Any] = int(deit_name[-6:-4] )
snake_case : Union[str, Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
snake_case : Optional[Any] = 192
snake_case : Union[str, Any] = 768
snake_case : List[str] = 12
snake_case : str = 3
elif deit_name[9:].startswith("small" ):
snake_case : Optional[Any] = 384
snake_case : int = 1536
snake_case : int = 12
snake_case : str = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
snake_case : Any = 1024
snake_case : Tuple = 4096
snake_case : int = 24
snake_case : int = 16
# load original model from timm
snake_case : Union[str, Any] = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case : Tuple = timm_model.state_dict()
snake_case : Dict = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
snake_case : int = DeiTForImageClassificationWithTeacher(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case : Optional[Any] = DeiTImageProcessor(size=lowercase__ , crop_size=config.image_size )
snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case : List[str] = encoding["pixel_values"]
snake_case : Tuple = model(lowercase__ )
snake_case : List[str] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 148 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A : List[Any] = False
@skip_mps
class _UpperCamelCase ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =StableDiffusionAttendAndExcitePipeline
__UpperCAmelCase : Any =False
__UpperCAmelCase : List[str] =TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Tuple =TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
__UpperCAmelCase : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Optional[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def snake_case ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
__lowerCAmelCase = CLIPTextModel(UpperCAmelCase__ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case ( self , __a , __a=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
__lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__lowerCAmelCase = __lowerCAmelCase = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def snake_case ( self ):
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ )
__lowerCAmelCase = pipe(**UpperCAmelCase__ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__lowerCAmelCase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3 )
def snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def snake_case ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case ( self ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def snake_case ( self ):
super().test_save_load_local(expected_max_difference=5e-4 )
def snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case ( cls ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def snake_case ( cls ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
__lowerCAmelCase = torch.manual_seed(51 )
__lowerCAmelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
pipe.to("cuda" )
__lowerCAmelCase = "a painting of an elephant with glasses"
__lowerCAmelCase = [5, 7]
__lowerCAmelCase = pipe(
prompt=UpperCAmelCase__ , token_indices=UpperCAmelCase__ , guidance_scale=7.5 , generator=UpperCAmelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="numpy" , ).images[0]
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 57 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |