code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = '''levit'''
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any]=2_2_4 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : int=[1_2_8, 2_5_6, 3_8_4] , UpperCAmelCase__ : str=[4, 8, 1_2] , UpperCAmelCase__ : Optional[int]=[4, 4, 4] , UpperCAmelCase__ : Optional[Any]=[1_6, 1_6, 1_6] , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=[2, 2, 2] , UpperCAmelCase__ : List[str]=[2, 2, 2] , UpperCAmelCase__ : Dict=0.02 , **UpperCAmelCase__ : Optional[int] , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = kernel_size
lowerCAmelCase = stride
lowerCAmelCase = padding
lowerCAmelCase = hidden_sizes
lowerCAmelCase = num_attention_heads
lowerCAmelCase = depths
lowerCAmelCase = key_dim
lowerCAmelCase = drop_path_rate
lowerCAmelCase = patch_size
lowerCAmelCase = attention_ratio
lowerCAmelCase = mlp_ratio
lowerCAmelCase = initializer_range
lowerCAmelCase = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Dict ) -> float:
return 1E-4
| 4 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : str=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowerCAmelCase = nn.Parameter(lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowerCAmelCase = nn.Parameter(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : int ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase = np.asarray(weights[0] )
lowerCAmelCase = np.asarray(weights[1] )
lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase ).view(-1 , lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def a_ ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int] ):
# set torch weights for 1-to-1 comparison
lowerCAmelCase = np.asarray(weights[0] )
lowerCAmelCase = np.asarray(weights[1] )
lowerCAmelCase = np.asarray(weights[2] )
lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase ).view(-1 , lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
# layernorm 1
lowerCAmelCase = weights[0][0][0]
lowerCAmelCase = np.asarray(layer_norm_a[0] )
lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# lsh weights + output
lowerCAmelCase = weights[0][1]
if len(lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase , torch_block.attention , lowerCamelCase )
else:
set_layer_weights_in_torch_local(lowerCamelCase , torch_block.attention , lowerCamelCase )
# intermediate weighs
lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase ) == 4:
lowerCAmelCase = intermediate_weights[2]
# layernorm 2
lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# intermediate dense
lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
# intermediate out
lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Dict ):
# reformer model
lowerCAmelCase = torch_model.reformer
# word embeds
lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase ) , )
if isinstance(weights[3] , lowerCamelCase ):
lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowerCAmelCase = nn.Parameter(torch.tensor(lowerCamelCase ) )
lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# output layer norm
lowerCAmelCase = np.asarray(weights[7][0] )
lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase ) , torch.tensor(lowerCamelCase ) , )
# output embeddings
lowerCAmelCase = np.asarray(weights[9][0] )
lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase ) , )
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : str ):
# Initialise PyTorch model
lowerCAmelCase = ReformerConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase = ReformerModelWithLMHead(lowerCamelCase )
with open(lowerCamelCase , 'rb' ) as f:
lowerCAmelCase = pickle.load(lowerCamelCase )['weights']
set_model_weights_in_torch(lowerCamelCase , lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 4 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : str = LEDTokenizer
lowerCamelCase : Any = LEDTokenizerFast
lowerCamelCase : int = True
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
super().setUp()
lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase = {'unk_token': '<unk>'}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> Dict:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , **UpperCAmelCase__ : Optional[int] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Any ) -> int:
return "lower newer", "lower newer"
@cached_property
def __UpperCAmelCase ( self : int ) -> List[Any]:
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='pt' )
self.assertIn('input_ids' , UpperCAmelCase__ )
self.assertIn('attention_mask' , UpperCAmelCase__ )
self.assertNotIn('labels' , UpperCAmelCase__ )
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase__ )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=UpperCAmelCase__ , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def __UpperCAmelCase ( self : Any ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = ['A long paragraph for summarization.']
lowerCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='pt' )
lowerCAmelCase = tokenizer(text_target=UpperCAmelCase__ , return_tensors='pt' )
lowerCAmelCase = inputs['input_ids']
lowerCAmelCase = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = ['Summary of the text.', 'Another summary.']
lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ )
lowerCAmelCase = [[0] * len(UpperCAmelCase__ ) for x in encoded_output['input_ids']]
lowerCAmelCase = tokenizer.pad(UpperCAmelCase__ )
self.assertSequenceEqual(outputs['global_attention_mask'] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = 'A, <mask> AllenNLP sentence.'
lowerCAmelCase = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 4 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = '''table-transformer'''
lowerCamelCase : List[str] = ['''past_key_values''']
lowerCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Tuple=1_0_0 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : int=2_0_4_8 , UpperCAmelCase__ : List[str]=8 , UpperCAmelCase__ : Union[str, Any]=6 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]="relu" , UpperCAmelCase__ : Dict=2_5_6 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]="sine" , UpperCAmelCase__ : Optional[int]="resnet50" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : List[Any]=0.1 , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config.get('model_type' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(UpperCAmelCase__ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None, None, None
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : int ) -> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return self.d_model
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> float:
return 1E-5
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return 1_2
| 4 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case =""" def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(UpperCAmelCase__ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str]=None ) -> Union[str, Any]:
lowerCAmelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowerCAmelCase = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
lowerCAmelCase = black.format_str(UpperCAmelCase__ , mode=UpperCAmelCase__ )
lowerCAmelCase = os.path.join(self.transformer_dir , 'new_code.py' )
with open(UpperCAmelCase__ , 'w' , newline='\n' ) as f:
f.write(UpperCAmelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase__ )
with open(UpperCAmelCase__ , 'r' ) as f:
self.assertTrue(f.read() , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Any:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , UpperCAmelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , UpperCAmelCase__ ) , )
# Copy consistency with a really long name
lowerCAmelCase = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , UpperCAmelCase__ , UpperCAmelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , UpperCAmelCase__ , overwrite_result=re.sub('Bert' , 'TestModel' , UpperCAmelCase__ ) , )
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowerCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowerCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
UpperCAmelCase__ , UpperCAmelCase__ , localized_readme['format_model_list'] )
self.assertFalse(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
UpperCAmelCase__ , UpperCAmelCase__ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCAmelCase__ )
lowerCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowerCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCAmelCase , lowerCAmelCase = check_copies.convert_to_localized_md(
UpperCAmelCase__ , UpperCAmelCase__ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : int = 1000 ):
lowerCAmelCase , lowerCAmelCase = 1, 1
lowerCAmelCase = 2
while True:
lowerCAmelCase = 0
lowerCAmelCase = fa + fa
lowerCAmelCase , lowerCAmelCase = fa, f
index += 1
for _ in str(lowerCamelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 4 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 1 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__snake_case ="""scheduler_config.json"""
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = 1
lowerCamelCase : List[str] = 2
lowerCamelCase : str = 3
lowerCamelCase : Any = 4
lowerCamelCase : Dict = 5
@dataclass
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : jnp.ndarray
class UpperCAmelCase_ :
lowerCamelCase : Any = SCHEDULER_CONFIG_NAME
lowerCamelCase : str = ['''dtype''']
lowerCamelCase : int = []
lowerCamelCase : Dict = True
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : str , ) -> List[Any]:
lowerCAmelCase , lowerCAmelCase = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase__ , subfolder=UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase , lowerCAmelCase = cls.from_config(UpperCAmelCase__ , return_unused_kwargs=UpperCAmelCase__ , **UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , 'create_state' ) and getattr(UpperCAmelCase__ , 'has_state' , UpperCAmelCase__ ):
lowerCAmelCase = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Union[str, os.PathLike] , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[int] ) -> List[Any]:
self.save_config(save_directory=UpperCAmelCase__ , push_to_hub=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return self._get_compatibles()
@classmethod
def __UpperCAmelCase ( cls : List[str] ) -> List[str]:
lowerCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase = importlib.import_module(__name__.split('.' )[0] )
lowerCAmelCase = [
getattr(UpperCAmelCase__ , UpperCAmelCase__ ) for c in compatible_classes_str if hasattr(UpperCAmelCase__ , UpperCAmelCase__ )
]
return compatible_classes
def a_ ( lowerCamelCase : jnp.ndarray , lowerCamelCase : Tuple[int] ):
assert len(lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase ) - x.ndim) ) , lowerCamelCase )
def a_ ( lowerCamelCase : int , lowerCamelCase : Dict=0.999 , lowerCamelCase : Optional[Any]=jnp.floataa ):
def alpha_bar(lowerCamelCase : Dict ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase = []
for i in range(lowerCamelCase ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase ) / alpha_bar(lowerCamelCase ) , lowerCamelCase ) )
return jnp.array(lowerCamelCase , dtype=lowerCamelCase )
@flax.struct.dataclass
class UpperCAmelCase_ :
lowerCamelCase : jnp.ndarray
lowerCamelCase : jnp.ndarray
lowerCamelCase : jnp.ndarray
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
lowerCAmelCase = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowerCAmelCase = 1.0 - betas
lowerCAmelCase = jnp.cumprod(UpperCAmelCase__ , axis=0 )
return cls(
alphas=UpperCAmelCase__ , betas=UpperCAmelCase__ , alphas_cumprod=UpperCAmelCase__ , )
def a_ ( lowerCamelCase : CommonSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray ):
lowerCAmelCase = state.alphas_cumprod
lowerCAmelCase = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase = sqrt_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(lowerCamelCase , original_samples.shape )
lowerCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase = broadcast_to_shape_from_left(lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a_ ( lowerCamelCase : CommonSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray ):
lowerCAmelCase , lowerCAmelCase = get_sqrt_alpha_prod(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a_ ( lowerCamelCase : CommonSchedulerState , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray , lowerCamelCase : jnp.ndarray ):
lowerCAmelCase , lowerCAmelCase = get_sqrt_alpha_prod(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Any=3_0 , UpperCAmelCase__ : str=4_0_0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=1 / 2_5_5 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[int]=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
lowerCAmelCase = do_pad
def __UpperCAmelCase ( self : int ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=False ) -> str:
if not batched:
lowerCAmelCase = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
lowerCAmelCase , lowerCAmelCase = image.size
else:
lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase = self.size['shortest_edge']
elif w > h:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = self.size['shortest_edge']
else:
lowerCAmelCase = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
lowerCAmelCase = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = DetrImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
lowerCAmelCase = DetrImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ) -> str:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_pad' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Dict ) -> str:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : int ) -> Tuple:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# prepare image and target
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCAmelCase = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
lowerCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify orig_size
lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : str ) -> str:
# prepare image, target and masks_path
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCAmelCase = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
lowerCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify masks
lowerCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCAmelCase__ )
# verify orig_size
lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str ) -> Any:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowerCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase = 'sgugger/tiny-distilbert-classification'
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , only_pretrain_model=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , torchscript=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , fpaa=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase__ )
# set architectures equal to `None`
lowerCAmelCase = None
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : List[Any] ) -> str:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Dict ) -> str:
lowerCAmelCase = 'sshleifer/tinier_bart'
lowerCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
lowerCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = 'sshleifer/tinier_bart'
lowerCAmelCase = AutoConfig.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ , configs=[config] )
lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , save_to_csv=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(UpperCAmelCase__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(UpperCAmelCase__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(UpperCAmelCase__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(UpperCAmelCase__ , 'env.csv' ) , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , 'env.csv' ) ).exists() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(UpperCAmelCase__ : str ):
self.assertTrue(hasattr(UpperCAmelCase__ , 'sequential' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'cumulative' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'current' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase__ , inference=UpperCAmelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase__ , 'log.txt' ) , log_print=UpperCAmelCase__ , trace_memory_line_by_line=UpperCAmelCase__ , multi_process=UpperCAmelCase__ , )
lowerCAmelCase = PyTorchBenchmark(UpperCAmelCase__ )
lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase__ , 'log.txt' ) ).exists() )
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def a_ ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase )
if is_prime(lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case ={
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowerCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase = {'unk_token': '<unk>'}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
lowerCAmelCase = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , UpperCAmelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , **UpperCAmelCase__ : Tuple ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , **UpperCAmelCase__ : Optional[int] ) -> Any:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Dict ) -> Any:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase__ )
lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_image_processor(do_normalize=UpperCAmelCase__ )
lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='np' )
lowerCAmelCase = processor(images=UpperCAmelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = processor(text=UpperCAmelCase__ , return_tensors='np' )
lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = 'google/owlvit-base-patch32'
lowerCAmelCase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = ['cat', 'nasa badge']
lowerCAmelCase = processor(text=UpperCAmelCase__ )
lowerCAmelCase = 1_6
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = 'google/owlvit-base-patch32'
lowerCAmelCase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = [['cat', 'nasa badge'], ['person']]
lowerCAmelCase = processor(text=UpperCAmelCase__ )
lowerCAmelCase = 1_6
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = max([len(UpperCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = 'google/owlvit-base-patch32'
lowerCAmelCase = OwlViTProcessor.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = ['cat', 'nasa badge']
lowerCAmelCase = processor(text=UpperCAmelCase__ )
lowerCAmelCase = 1_6
lowerCAmelCase = inputs['input_ids']
lowerCAmelCase = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(images=UpperCAmelCase__ , query_images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
lowerCamelCase : Any = ['''keras_nlp''']
def __init__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> Any:
requires_backends(self , ['keras_nlp'] )
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] = None
def a_ ( lowerCamelCase : Any , lowerCamelCase : List[Any]=0.999 , lowerCamelCase : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
lowerCAmelCase = i / num_diffusion_timesteps
lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) , lowerCamelCase ) )
return torch.tensor(lowerCamelCase , dtype=torch.floataa )
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : Union[str, Any] = 1
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase__ : int = 1_0_0_0 , UpperCAmelCase__ : float = 0.0_001 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : str = "linear" , UpperCAmelCase__ : Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : str = "epsilon" , UpperCAmelCase__ : float = 1.0 , **UpperCAmelCase__ : str , ) -> Any:
if kwargs.get('set_alpha_to_one' , UpperCAmelCase__ ) is not None:
lowerCAmelCase = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , UpperCAmelCase__ , standard_warn=UpperCAmelCase__ )
lowerCAmelCase = kwargs['set_alpha_to_one']
if trained_betas is not None:
lowerCAmelCase = torch.tensor(UpperCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase = torch.linspace(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase = betas_for_alpha_bar(UpperCAmelCase__ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase = 1.0 - self.betas
lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCAmelCase = 1.0
# setable values
lowerCAmelCase = None
lowerCAmelCase = torch.from_numpy(np.arange(0 , UpperCAmelCase__ ).copy().astype(np.intaa ) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Optional[int] = None ) -> torch.FloatTensor:
return sample
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, torch.device] = None ) -> Dict:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
lowerCAmelCase = num_inference_steps
lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase = (np.arange(0 , UpperCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa )
lowerCAmelCase = torch.from_numpy(UpperCAmelCase__ ).to(UpperCAmelCase__ )
self.timesteps += self.config.steps_offset
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCAmelCase = self.alphas_cumprod[timestep]
lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
lowerCAmelCase = model_output
lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase__ , pred_original_sample=UpperCAmelCase__ )
def __len__( self : Any ) -> Optional[Any]:
return self.config.num_train_timesteps
| 4 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( __lowercase ):
@staticmethod
@abstractmethod
def __UpperCAmelCase ( UpperCAmelCase__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self : List[Any] ) -> str:
raise NotImplementedError()
| 4 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(lowerCamelCase )
lowerCAmelCase = cos(lowerCamelCase )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 - _cos) / 2
lowerCAmelCase = 1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(lowerCamelCase )
lowerCAmelCase = cos(lowerCamelCase )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = (1 + _cos) / 2
lowerCAmelCase = -1 - _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(lowerCamelCase )
lowerCAmelCase = cos(lowerCamelCase )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = _sin / 2
lowerCAmelCase = 0
lowerCAmelCase = -ba
lowerCAmelCase = 1 + alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float = 1 / sqrt(2 ) ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(lowerCamelCase )
lowerCAmelCase = cos(lowerCamelCase )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 1 - alpha
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 + alpha
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : float = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(lowerCamelCase )
lowerCAmelCase = cos(lowerCamelCase )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = 1 + alpha * big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha * big_a
lowerCAmelCase = 1 + alpha / big_a
lowerCAmelCase = -2 * _cos
lowerCAmelCase = 1 - alpha / big_a
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : float = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(lowerCamelCase )
lowerCAmelCase = cos(lowerCamelCase )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(lowerCamelCase ) * alpha
lowerCAmelCase = big_a * (pmc + aaa)
lowerCAmelCase = 2 * big_a * mpc
lowerCAmelCase = big_a * (pmc - aaa)
lowerCAmelCase = ppmc + aaa
lowerCAmelCase = -2 * pmpc
lowerCAmelCase = ppmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : float = 1 / sqrt(2 ) , ):
lowerCAmelCase = tau * frequency / samplerate
lowerCAmelCase = sin(lowerCamelCase )
lowerCAmelCase = cos(lowerCamelCase )
lowerCAmelCase = _sin / (2 * q_factor)
lowerCAmelCase = 10 ** (gain_db / 40)
lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase = 2 * sqrt(lowerCamelCase ) * alpha
lowerCAmelCase = big_a * (ppmc + aaa)
lowerCAmelCase = -2 * big_a * pmpc
lowerCAmelCase = big_a * (ppmc - aaa)
lowerCAmelCase = pmc + aaa
lowerCAmelCase = 2 * mpc
lowerCAmelCase = pmc - aaa
lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 4 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__snake_case =numpy.array([0, 0])
__snake_case =numpy.array([0.5, 0.8_6_6_0_2_5_4])
__snake_case =numpy.array([1, 0])
__snake_case =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a_ ( lowerCamelCase : list[numpy.ndarray] , lowerCamelCase : int ):
lowerCAmelCase = initial_vectors
for _ in range(lowerCamelCase ):
lowerCAmelCase = iteration_step(lowerCamelCase )
return vectors
def a_ ( lowerCamelCase : list[numpy.ndarray] ):
lowerCAmelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCAmelCase = vectors[i + 1]
new_vectors.append(lowerCamelCase )
lowerCAmelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a_ ( lowerCamelCase : numpy.ndarray , lowerCamelCase : float ):
lowerCAmelCase = numpy.radians(lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = numpy.cos(lowerCamelCase ), numpy.sin(lowerCamelCase )
lowerCAmelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase : list[numpy.ndarray] ):
lowerCAmelCase = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
plt.plot(lowerCamelCase , lowerCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 4 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
__snake_case =tuple[int, int]
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : set[int] , UpperCAmelCase__ : Mapping[EdgeT, int] ) -> None:
lowerCAmelCase = vertices
lowerCAmelCase = {
(min(UpperCAmelCase__ ), max(UpperCAmelCase__ )): weight for edge, weight in edges.items()
}
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : EdgeT , UpperCAmelCase__ : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase = weight
def __UpperCAmelCase ( self : List[Any] ) -> Graph:
lowerCAmelCase = Graph({min(self.vertices )} , {} )
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase = edge
lowerCAmelCase = weight
subgraph.add_edge(UpperCAmelCase__ , UpperCAmelCase__ )
return subgraph
def a_ ( lowerCamelCase : str = "p107_network.txt" ):
lowerCAmelCase = os.path.abspath(os.path.dirname(lowerCamelCase ) )
lowerCAmelCase = os.path.join(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = {}
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
with open(lowerCamelCase ) as f:
lowerCAmelCase = f.read().strip().split('\n' )
lowerCAmelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowerCamelCase ) ):
for edgea in range(lowerCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase = Graph(set(range(len(lowerCamelCase ) ) ) , lowerCamelCase )
lowerCAmelCase = graph.prims_algorithm()
lowerCAmelCase = sum(graph.edges.values() )
lowerCAmelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def a_ ( lowerCamelCase : str ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__snake_case =parser.parse_args()
__snake_case =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 4 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = '''rwkv'''
lowerCamelCase : Dict = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : str , UpperCAmelCase__ : List[str]=5_0_2_7_7 , UpperCAmelCase__ : List[str]=1_0_2_4 , UpperCAmelCase__ : List[Any]=4_0_9_6 , UpperCAmelCase__ : Optional[Any]=3_2 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : Optional[Any]=6 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> Union[str, Any]:
lowerCAmelCase = vocab_size
lowerCAmelCase = context_length
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = rescale_every
lowerCAmelCase = use_cache
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase = 6
lowerCAmelCase = 128
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase = 12
lowerCAmelCase = 192
lowerCAmelCase = (2, 2, 18, 2)
lowerCAmelCase = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
lowerCAmelCase = window_size
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
return config
def a_ ( lowerCamelCase : Any ):
if "encoder.mask_token" in name:
lowerCAmelCase = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
lowerCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowerCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowerCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowerCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
lowerCAmelCase = 'layernorm.weight'
if name == "encoder.norm.bias":
lowerCAmelCase = 'layernorm.bias'
if "decoder" in name:
pass
else:
lowerCAmelCase = 'swin.' + name
return name
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(lowerCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase = key.split('.' )
lowerCAmelCase = int(key_split[2] )
lowerCAmelCase = int(key_split[4] )
lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[
dim : dim * 2, :
]
lowerCAmelCase = val[-dim:, :]
else:
lowerCAmelCase = val[
:dim
]
lowerCAmelCase = val[
dim : dim * 2
]
lowerCAmelCase = val[
-dim:
]
else:
lowerCAmelCase = val
return orig_state_dict
def a_ ( lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ):
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )['model']
lowerCAmelCase = get_swin_config(lowerCamelCase )
lowerCAmelCase = SwinForMaskedImageModeling(lowerCamelCase )
model.eval()
lowerCAmelCase = convert_state_dict(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = ViTImageProcessor(size={'height': 192, 'width': 192} )
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' )
with torch.no_grad():
lowerCAmelCase = model(**lowerCamelCase ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[str] = '''dpr'''
def __init__( self : List[Any] , UpperCAmelCase__ : Tuple=3_0_5_2_2 , UpperCAmelCase__ : Tuple=7_6_8 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : Tuple=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : int = 0 , **UpperCAmelCase__ : Union[str, Any] , ) -> List[str]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = projection_dim
lowerCAmelCase = position_embedding_type
| 4 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = RoCBertTokenizer
lowerCamelCase : Tuple = None
lowerCamelCase : int = False
lowerCamelCase : Tuple = True
lowerCamelCase : int = filter_non_english
def __UpperCAmelCase ( self : Dict ) -> Dict:
super().setUp()
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCAmelCase = {}
lowerCAmelCase = {}
for i, value in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = i
lowerCAmelCase = i
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(UpperCAmelCase__ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = i
lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self : Any ) -> Tuple:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCAmelCase = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase = ['的', '人', '有']
lowerCAmelCase = ''.join(UpperCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ )
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase = tokenizer.encode('你好' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
lowerCAmelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase = '你好,你是谁'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.prepare_for_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str=1_3 , UpperCAmelCase__ : Dict=3_0 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=1_0 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Optional[Any]=0.6 , UpperCAmelCase__ : str=None , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = mask_ratio
lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ) -> int:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ) -> Tuple:
lowerCAmelCase = TFViTMAEModel(config=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> List[str]:
lowerCAmelCase = TFViTMAEForPreTraining(UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
# expected sequence length = num_patches
lowerCAmelCase = (self.image_size // self.patch_size) ** 2
lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = TFViTMAEForPreTraining(UpperCAmelCase__ )
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCamelCase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
lowerCamelCase : List[str] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = TFViTMAEModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> List[Any]:
# make the mask reproducible
np.random.seed(2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowerCAmelCase = copy.deepcopy(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowerCAmelCase = outputs_dict[0].numpy()
lowerCAmelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# make the mask reproducible
np.random.seed(2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCAmelCase__ : Optional[int] ):
lowerCAmelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCAmelCase__ ):
lowerCAmelCase = v.numpy()
else:
lowerCAmelCase = np.array(UpperCAmelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = prepare_numpy_arrays(UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowerCAmelCase = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase = tf.constant(UpperCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase = tf_noise
super().check_pt_tf_models(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Tuple:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCAmelCase__ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(UpperCAmelCase__ , UpperCAmelCase__ ),)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCAmelCase__ , '_keras_serializable' , UpperCAmelCase__ )
}
lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase = tf.convert_to_tensor(UpperCAmelCase__ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
lowerCAmelCase = main_layer_class(UpperCAmelCase__ )
lowerCAmelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCAmelCase = tf.keras.Model(UpperCAmelCase__ , outputs=main_layer(UpperCAmelCase__ ) )
lowerCAmelCase = model(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'keras_model.h5' )
model.save(UpperCAmelCase__ )
lowerCAmelCase = tf.keras.models.load_model(
UpperCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCAmelCase__ , tf.keras.Model )
lowerCAmelCase = model(UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase = outputs.last_hidden_state.numpy()
lowerCAmelCase = 0
else:
lowerCAmelCase = outputs.logits.numpy()
lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ , saved_model=UpperCAmelCase__ )
lowerCAmelCase = model_class.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase = after_outputs['last_hidden_state'].numpy()
lowerCAmelCase = 0
else:
lowerCAmelCase = after_outputs['logits'].numpy()
lowerCAmelCase = 0
lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase__ , 1E-5 )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
lowerCAmelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCAmelCase__ )
lowerCAmelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCAmelCase = model_class.from_config(model.config )
lowerCAmelCase = new_model(UpperCAmelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
lowerCAmelCase = new_model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase = ViTMAEConfig()
lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCAmelCase = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
# verify the logits
lowerCAmelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowerCAmelCase = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
| 4 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]=False ):
try:
lowerCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase = strtobool(lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__snake_case =parse_flag_from_env("""RUN_SLOW""", default=False)
def a_ ( lowerCamelCase : Any ):
return unittest.skip('Test was skipped' )(lowerCamelCase )
def a_ ( lowerCamelCase : List[Any] ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase )
def a_ ( lowerCamelCase : Dict ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase )
def a_ ( lowerCamelCase : Any ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase )
def a_ ( lowerCamelCase : Optional[Any] ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase )
def a_ ( lowerCamelCase : List[str] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int]=None , lowerCamelCase : List[str]=None ):
if test_case is None:
return partial(lowerCamelCase , version=lowerCamelCase )
return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase ) , f'''test requires torch version >= {version}''' )(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase )
__snake_case =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def a_ ( lowerCamelCase : Union[str, Any] ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase )
class UpperCAmelCase_ ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls : List[Any] ) -> Any:
lowerCAmelCase = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls : Any ) -> Union[str, Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase__ )
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Union[mock.Mock, List[mock.Mock]] ) -> Optional[int]:
lowerCAmelCase = mocks if isinstance(UpperCAmelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def a_ ( lowerCamelCase : List[str] ):
lowerCAmelCase = AcceleratorState()
lowerCAmelCase = tensor[None].clone().to(state.device )
lowerCAmelCase = gather(lowerCamelCase ).cpu()
lowerCAmelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase ):
return False
return True
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] ) -> Tuple:
lowerCAmelCase = returncode
lowerCAmelCase = stdout
lowerCAmelCase = stderr
async def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Any ):
while True:
lowerCAmelCase = await stream.readline()
if line:
callback(lowerCamelCase )
else:
break
async def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Optional[int]=False ):
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase ) )
lowerCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase = []
lowerCAmelCase = []
def tee(lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple="" ):
lowerCAmelCase = line.decode('utf-8' ).rstrip()
sink.append(lowerCamelCase )
if not quiet:
print(lowerCamelCase , lowerCamelCase , file=lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=lowerCamelCase , )
return _RunOutput(await p.wait() , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Tuple=None , lowerCamelCase : List[Any]=180 , lowerCamelCase : List[Any]=False , lowerCamelCase : Any=True ):
lowerCAmelCase = asyncio.get_event_loop()
lowerCAmelCase = loop.run_until_complete(
_stream_subprocess(lowerCamelCase , env=lowerCamelCase , stdin=lowerCamelCase , timeout=lowerCamelCase , quiet=lowerCamelCase , echo=lowerCamelCase ) )
lowerCAmelCase = ' '.join(lowerCamelCase )
if result.returncode > 0:
lowerCAmelCase = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class UpperCAmelCase_ ( __lowercase ):
pass
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : int=False ):
try:
lowerCAmelCase = subprocess.check_output(lowerCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase , 'decode' ):
lowerCAmelCase = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(lowerCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 4 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
lowerCamelCase : int = ['''onnx''']
def __init__( self : Dict , *UpperCAmelCase__ : str , **UpperCAmelCase__ : int ) -> Dict:
requires_backends(self , ['onnx'] )
@classmethod
def __UpperCAmelCase ( cls : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
| 4 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
__snake_case ={
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCAmelCase_ :
def __init__( self : List[Any] , UpperCAmelCase__ : dict[str, list[str]] , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase = {}
lowerCAmelCase = source_vertex
def __UpperCAmelCase ( self : Tuple ) -> None:
lowerCAmelCase = {self.source_vertex}
lowerCAmelCase = None
lowerCAmelCase = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase__ )
lowerCAmelCase = vertex
queue.append(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase = self.parent.get(UpperCAmelCase__ )
if target_vertex_parent is None:
lowerCAmelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(UpperCAmelCase__ )
return self.shortest_path(UpperCAmelCase__ ) + F'''->{target_vertex}'''
if __name__ == "__main__":
__snake_case =Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 1 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , 'num_heads' ) )
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : Dict=6_4 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Union[str, Any]=[1_6, 4_8, 9_6] , UpperCAmelCase__ : Dict=[1, 3, 6] , UpperCAmelCase__ : Optional[Any]=[1, 2, 1_0] , UpperCAmelCase__ : List[Any]=[7, 3, 3] , UpperCAmelCase__ : Union[str, Any]=[4, 2, 2] , UpperCAmelCase__ : List[Any]=[2, 1, 1] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 2] , UpperCAmelCase__ : Dict=[False, False, True] , UpperCAmelCase__ : Any=[0.0, 0.0, 0.0] , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : str=1E-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=2 , ) -> Tuple:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_sizes
lowerCAmelCase = patch_stride
lowerCAmelCase = patch_padding
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = num_labels
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = num_heads
lowerCAmelCase = stride_kv
lowerCAmelCase = depth
lowerCAmelCase = cls_token
lowerCAmelCase = attention_drop_rate
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
def __UpperCAmelCase ( self : int ) -> List[Any]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : str ) -> str:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ) -> Dict:
lowerCAmelCase = CvtModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = (self.image_size, self.image_size)
lowerCAmelCase , lowerCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = CvtForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
lowerCamelCase : str = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : List[str] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Tuple = False
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = CvtModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
def __UpperCAmelCase ( self : Any ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ):
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
@slow
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = CvtModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
lowerCamelCase : List[Any] = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowerCamelCase : Union[str, Any] = '''document_qa'''
lowerCamelCase : List[str] = AutoProcessor
lowerCamelCase : List[str] = VisionEncoderDecoderModel
lowerCamelCase : Union[str, Any] = ['''image''', '''text''']
lowerCamelCase : Any = ['''text''']
def __init__( self : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : "Image" , UpperCAmelCase__ : str ) -> Dict:
lowerCAmelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
lowerCAmelCase = task_prompt.replace('{user_input}' , UpperCAmelCase__ )
lowerCAmelCase = self.pre_processor.tokenizer(
UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors='pt' ).input_ids
lowerCAmelCase = self.pre_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[int] ) -> int:
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase__ , ).sequences
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple ) -> Dict:
lowerCAmelCase = self.pre_processor.batch_decode(UpperCAmelCase__ )[0]
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
lowerCAmelCase = re.sub(R'<.*?>' , '' , UpperCAmelCase__ , count=1 ).strip() # remove first task start token
lowerCAmelCase = self.pre_processor.tokenajson(UpperCAmelCase__ )
return sequence["answer"]
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def a_ ( lowerCamelCase : float , lowerCamelCase : int ):
lowerCAmelCase = u
for i in range(1 , lowerCamelCase ):
lowerCAmelCase = temp * (u - i)
return temp
def a_ ( ):
lowerCAmelCase = int(input('enter the numbers of values: ' ) )
lowerCAmelCase = []
for _ in range(lowerCamelCase ):
y.append([] )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
y[i].append(lowerCamelCase )
lowerCAmelCase = 0
print('enter the values of parameters in a list: ' )
lowerCAmelCase = list(map(lowerCamelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCamelCase ):
lowerCAmelCase = float(input() )
lowerCAmelCase = int(input('enter the value to interpolate: ' ) )
lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase ):
for j in range(n - i ):
lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase = y[0][0]
for i in range(1 , lowerCamelCase ):
summ += (ucal(lowerCamelCase , lowerCamelCase ) * y[0][i]) / math.factorial(lowerCamelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
lowerCamelCase : Union[str, Any] = LEDConfig
lowerCamelCase : Tuple = {}
lowerCamelCase : Any = '''gelu'''
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict=1_3 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : str=3_7 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict=2_0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : str=4 , ) -> Optional[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCAmelCase = prepare_led_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tf.concat(
[tf.zeros_like(UpperCAmelCase__ )[:, :-1], tf.ones_like(UpperCAmelCase__ )[:, -1:]] , axis=-1 , )
lowerCAmelCase = global_attention_mask
return config, inputs_dict
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = TFLEDModel(config=UpperCAmelCase__ ).get_decoder()
lowerCAmelCase = inputs_dict['input_ids']
lowerCAmelCase = input_ids[:1, :]
lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase = 1
# first forward pass
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 )
def a_ ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]=None , ):
if attention_mask is None:
lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : str = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : Dict = True
lowerCamelCase : List[Any] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : List[str] = False
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = TFLEDModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = tf.zeros_like(inputs_dict['attention_mask'] )
lowerCAmelCase = 2
lowerCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
lowerCAmelCase = True
lowerCAmelCase = self.model_tester.seq_length
lowerCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase__ : Any ):
lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase__ : Optional[Any] ):
lowerCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
lowerCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = len(UpperCAmelCase__ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
if self.is_encoder_decoder:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_decoder_attentions_output(UpperCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __UpperCAmelCase ( self : int ) -> List[Any]:
pass
def __UpperCAmelCase ( self : int ) -> Any:
# TODO: Head-masking not yet implement
pass
def a_ ( lowerCamelCase : Any ):
return tf.constant(lowerCamelCase , dtype=tf.intaa )
__snake_case =1e-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
lowerCAmelCase = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(**UpperCAmelCase__ )[0]
lowerCAmelCase = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
lowerCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-3 )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
lowerCAmelCase = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
lowerCAmelCase = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(**UpperCAmelCase__ )[0]
lowerCAmelCase = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
lowerCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-3 , rtol=1E-3 )
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=1_3 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[Any]=2_2_4 , UpperCAmelCase__ : Optional[Any]=3_0 , UpperCAmelCase__ : Any=4_0_0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[int]=[0.5, 0.5, 0.5] , ) -> int:
lowerCAmelCase = size if size is not None else {'height': 1_8, 'width': 1_8}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ViTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
lowerCAmelCase = EfficientFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self : List[Any] ) -> str:
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
# Initialize image_processor
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __UpperCAmelCase ( self : str ) -> Tuple:
# Initialize image_processor
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def __UpperCAmelCase ( self : List[Any] ) -> int:
# Initialize image_processor
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 4 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : list ):
if len(lowerCamelCase ) <= 1:
return lst
lowerCAmelCase = 1
while i < len(lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowerCAmelCase , lowerCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowerCAmelCase = 1
return lst
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 4 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case ={
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__snake_case ={"""mobilebert-uncased""": 512}
__snake_case ={}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = MobileBertTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]="[UNK]" , UpperCAmelCase__ : Optional[Any]="[SEP]" , UpperCAmelCase__ : List[Any]="[PAD]" , UpperCAmelCase__ : int="[CLS]" , UpperCAmelCase__ : str="[MASK]" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[str] , ) -> str:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**UpperCAmelCase__ )
lowerCAmelCase = do_lower_case
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int]=None ) -> Optional[int]:
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__lowercase ):
lowerCamelCase : Tuple = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict ) -> Dict:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[Any] ) -> str:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
lowerCamelCase : int = ['''flax''', '''transformers''']
def __init__( self : Tuple , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
lowerCamelCase : List[Any] = ['''flax''', '''transformers''']
def __init__( self : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[int] ) -> Tuple:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : str , *UpperCAmelCase__ : int , **UpperCAmelCase__ : int ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
class UpperCAmelCase_ ( metaclass=__lowercase ):
lowerCamelCase : Any = ['''flax''', '''transformers''']
def __init__( self : Any , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Any ) -> Optional[int]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : int , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int ) -> str:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
| 4 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def a_ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def a_ ( lowerCamelCase : dict ):
lowerCAmelCase = {key: len(lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase , lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
lowerCAmelCase = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase )
def a_ ( lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = []
for group_idx in range(lowerCamelCase ):
lowerCAmelCase = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCAmelCase = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCAmelCase = range(lowerCamelCase , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase )
return shards_indices_per_group
def a_ ( lowerCamelCase : dict , lowerCamelCase : int ):
lowerCAmelCase = _number_of_shards_in_gen_kwargs(lowerCamelCase )
if num_shards == 1:
return [dict(lowerCamelCase )]
else:
lowerCAmelCase = _distribute_shards(num_shards=lowerCamelCase , max_num_jobs=lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase , lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase : List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a_ ( lowerCamelCase : np.random.Generator , lowerCamelCase : dict ):
lowerCAmelCase = {len(lowerCamelCase ) for value in gen_kwargs.values() if isinstance(lowerCamelCase , lowerCamelCase )}
lowerCAmelCase = {}
for size in list_sizes:
lowerCAmelCase = list(range(lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCAmelCase = dict(lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = [value[i] for i in indices_per_size[len(lowerCamelCase )]]
return shuffled_kwargs
| 4 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 1 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__snake_case =True
from torch.cuda.amp import autocast
__snake_case =logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase : Optional[bool] = field(
default=__lowercase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCamelCase : Optional[bool] = field(
default=__lowercase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowerCamelCase : Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowerCamelCase : Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowerCamelCase : Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def a_ ( lowerCamelCase : ModelArguments , lowerCamelCase : TrainingArguments ):
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowerCAmelCase = logging.INFO
logger.setLevel(lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
default=__lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase : Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCamelCase : Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowerCamelCase : Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase : Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase : Optional[float] = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : WavaVecaForPreTraining
lowerCamelCase : WavaVecaFeatureExtractor
lowerCamelCase : Union[bool, str] = "longest"
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
lowerCAmelCase = self.feature_extractor.pad(
UpperCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
lowerCAmelCase = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
lowerCAmelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase = 1
lowerCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=UpperCAmelCase__ , min_masks=2 , )
return batch
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Optional[int] , *UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : List[str]=1.0 , **UpperCAmelCase__ : int ) -> int:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = 0
lowerCAmelCase = max_gumbel_temp
lowerCAmelCase = min_gumbel_temp
lowerCAmelCase = gumbel_temp_decay
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : nn.Module , UpperCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
lowerCAmelCase = self._prepare_inputs(UpperCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
else:
lowerCAmelCase = self.compute_loss(UpperCAmelCase__ , UpperCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
configure_logger(lowerCamelCase , lowerCamelCase )
# Downloading and loading a dataset from the hub.
lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase = DatasetDict()
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase = DatasetDict()
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=lowerCamelCase )
def prepare_dataset(lowerCamelCase : Optional[Any] ):
# check that all files have the correct sampling rate
lowerCAmelCase , lowerCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowerCAmelCase = datasets.map(
lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
lowerCAmelCase = vectorized_datasets.filter(
lambda lowerCamelCase : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowerCamelCase : Dict ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowerCAmelCase = vectorized_datasets.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
lowerCAmelCase = WavaVecaForPreTraining(lowerCamelCase )
lowerCAmelCase = DataCollatorForWavaVecaPretraining(model=lowerCamelCase , feature_extractor=lowerCamelCase )
lowerCAmelCase = WavaVecaPreTrainer(
model=lowerCamelCase , data_collator=lowerCamelCase , args=lowerCamelCase , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import os
def a_ ( ):
lowerCAmelCase = os.path.join(os.path.dirname(lowerCamelCase ) , 'num.txt' )
with open(lowerCamelCase ) as file_hand:
return str(sum(int(lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : List[Any] = '''bit'''
lowerCamelCase : Any = ['''preactivation''', '''bottleneck''']
lowerCamelCase : List[Any] = ['''SAME''', '''VALID''']
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Any=6_4 , UpperCAmelCase__ : Any=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCAmelCase__ : int=[3, 4, 6, 3] , UpperCAmelCase__ : int="preactivation" , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=3_2 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=3_2 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : str , ) -> int:
super().__init__(**UpperCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowerCAmelCase = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
lowerCAmelCase = num_channels
lowerCAmelCase = embedding_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = layer_type
lowerCAmelCase = hidden_act
lowerCAmelCase = global_padding
lowerCAmelCase = num_groups
lowerCAmelCase = drop_path_rate
lowerCAmelCase = embedding_dynamic_padding
lowerCAmelCase = output_stride
lowerCAmelCase = width_factor
lowerCAmelCase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
| 4 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__snake_case ="""
import os
"""
__snake_case ="""
def foo():
import os
return False
"""
__snake_case ="""
def foo():
def bar():
if True:
import os
return False
return bar()
"""
__snake_case ="""
import os
try:
import bar
except ImportError:
raise ValueError()
"""
__snake_case ="""
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
__snake_case ="""
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
__snake_case ="""
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
__snake_case ="""
import os
try:
import bar
except:
raise ValueError()
"""
__snake_case ="""
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
__snake_case ="""
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
__snake_case =[
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , lowerCamelCase )
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : Tuple ):
lowerCAmelCase = os.path.join(lowerCamelCase , 'test_file.py' )
with open(lowerCamelCase , 'w' ) as _tmp_file:
_tmp_file.write(lowerCamelCase )
lowerCAmelCase = get_imports(lowerCamelCase )
assert parsed_imports == ["os"]
| 4 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__snake_case =datasets.load_iris()
__snake_case =np.array(data["""data"""])
__snake_case =np.array(data["""target"""])
__snake_case =data["""target_names"""]
__snake_case , __snake_case , __snake_case , __snake_case =train_test_split(X, y)
def a_ ( lowerCamelCase : str , lowerCamelCase : Tuple ):
return np.linalg.norm(np.array(lowerCamelCase ) - np.array(lowerCamelCase ) )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str]=5 ):
lowerCAmelCase = zip(lowerCamelCase , lowerCamelCase )
# List of distances of all points from the point to be classified
lowerCAmelCase = []
for data_point in data:
lowerCAmelCase = euclidean_distance(data_point[0] , lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCAmelCase = Counter(lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 4 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : set ):
lowerCAmelCase , lowerCAmelCase = len(lowerCamelCase ), len(grid[0] )
if (
min(lowerCamelCase , lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase = 0
count += depth_first_search(lowerCamelCase , row + 1 , lowerCamelCase , lowerCamelCase )
count += depth_first_search(lowerCamelCase , row - 1 , lowerCamelCase , lowerCamelCase )
count += depth_first_search(lowerCamelCase , lowerCamelCase , col + 1 , lowerCamelCase )
count += depth_first_search(lowerCamelCase , lowerCamelCase , col - 1 , lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case =16
__snake_case =32
def a_ ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 ):
lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCamelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase = 8
else:
lowerCAmelCase = None
return tokenizer.pad(
lowerCamelCase , padding='longest' , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
lowerCAmelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case =mocked_dataloaders # noqa: F811
def a_ ( lowerCamelCase : Any , lowerCamelCase : Tuple ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCamelCase ) == "1":
lowerCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase = config['lr']
lowerCAmelCase = int(config['num_epochs'] )
lowerCAmelCase = int(config['seed'] )
lowerCAmelCase = int(config['batch_size'] )
set_seed(lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = get_dataloaders(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase = AdamW(params=model.parameters() , lr=lowerCamelCase )
# Instantiate scheduler
lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCAmelCase = os.path.split(lowerCamelCase )[-1].split('.' )[0]
accelerator.init_trackers(lowerCamelCase , lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCAmelCase = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase = model(**lowerCamelCase )
lowerCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase = model(**lowerCamelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase , lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(lowerCamelCase ),
'epoch': epoch,
} , step=lowerCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def a_ ( ):
lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase , default=lowerCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowerCamelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self : List[Any] , UpperCAmelCase__ : int ) -> None:
lowerCAmelCase = order
# a_{0} ... a_{k}
lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCAmelCase = [0.0] * self.order
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : list[float] , UpperCAmelCase__ : list[float] ) -> None:
if len(UpperCAmelCase__ ) < self.order:
lowerCAmelCase = [1.0, *a_coeffs]
if len(UpperCAmelCase__ ) != self.order + 1:
lowerCAmelCase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(UpperCAmelCase__ )}'''
)
raise ValueError(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != self.order + 1:
lowerCAmelCase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(UpperCAmelCase__ )}'''
)
raise ValueError(UpperCAmelCase__ )
lowerCAmelCase = a_coeffs
lowerCAmelCase = b_coeffs
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : float ) -> float:
lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCAmelCase = self.input_history[:-1]
lowerCAmelCase = self.output_history[:-1]
lowerCAmelCase = sample
lowerCAmelCase = result
return result
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__snake_case ={
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = '''maskformer'''
lowerCamelCase : List[Any] = {'''hidden_size''': '''mask_feature_size'''}
lowerCamelCase : Tuple = ['''resnet''', '''swin''']
lowerCamelCase : Any = ['''detr''']
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 2_5_6 , UpperCAmelCase__ : int = 2_5_6 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[Dict] = None , UpperCAmelCase__ : Optional[Dict] = None , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : float = 20.0 , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Tuple , ) -> Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCAmelCase = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config.pop('model_type' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
lowerCAmelCase = (
decoder_config.pop('model_type' ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {','.join(self.decoders_supported )}''' )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = CONFIG_MAPPING[decoder_type]
lowerCAmelCase = config_class.from_dict(UpperCAmelCase__ )
lowerCAmelCase = backbone_config
lowerCAmelCase = decoder_config
# main feature dimension for the model
lowerCAmelCase = fpn_feature_size
lowerCAmelCase = mask_feature_size
# initializer
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
# Hungarian matcher && loss
lowerCAmelCase = cross_entropy_weight
lowerCAmelCase = dice_weight
lowerCAmelCase = mask_weight
lowerCAmelCase = use_auxiliary_loss
lowerCAmelCase = no_object_weight
lowerCAmelCase = output_auxiliary_logits
lowerCAmelCase = self.decoder_config.encoder_attention_heads
lowerCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : str ) -> str:
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def __UpperCAmelCase ( self : List[str] ) -> Dict[str, any]:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.decoder_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__snake_case =logging.getLogger(__name__)
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ :
lowerCamelCase : str
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[str] = None
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ :
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[Union[int, float]] = None
lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[InputFeatures]
def __init__( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : bool = False , ) -> int:
lowerCAmelCase = hans_processors[task]()
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase__ ) , UpperCAmelCase__ , ) , )
lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
lowerCAmelCase = torch.load(UpperCAmelCase__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
lowerCAmelCase = (
processor.get_dev_examples(UpperCAmelCase__ ) if evaluate else processor.get_train_examples(UpperCAmelCase__ )
)
logger.info('Training examples: %s' , len(UpperCAmelCase__ ) )
lowerCAmelCase = hans_convert_examples_to_features(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
logger.info('Saving features into cached file %s' , UpperCAmelCase__ )
torch.save(self.features , UpperCAmelCase__ )
def __len__( self : List[str] ) -> Optional[int]:
return len(self.features )
def __getitem__( self : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self : Any ) -> Any:
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
lowerCamelCase : List[InputFeatures]
def __init__( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] = 1_2_8 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : bool = False , ) -> int:
lowerCAmelCase = hans_processors[task]()
lowerCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
lowerCAmelCase = processor.get_dev_examples(UpperCAmelCase__ ) if evaluate else processor.get_train_examples(UpperCAmelCase__ )
lowerCAmelCase = hans_convert_examples_to_features(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCAmelCase__ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __UpperCAmelCase ( self : Dict ) -> Any:
return self.dataset
def __len__( self : List[Any] ) -> List[str]:
return len(self.features )
def __getitem__( self : int , UpperCAmelCase__ : int ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self : Tuple ) -> int:
return self.label_list
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Dict ) -> Any:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase__ , 'heuristics_train_set.txt' ) ) , 'train' )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def __UpperCAmelCase ( self : List[str] ) -> Any:
return ["contradiction", "entailment", "neutral"]
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> Tuple:
lowerCAmelCase = []
for i, line in enumerate(UpperCAmelCase__ ):
if i == 0:
continue
lowerCAmelCase = '%s-%s' % (set_type, line[0])
lowerCAmelCase = line[5]
lowerCAmelCase = line[6]
lowerCAmelCase = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCAmelCase = line[0]
examples.append(InputExample(guid=UpperCAmelCase__ , text_a=UpperCAmelCase__ , text_b=UpperCAmelCase__ , label=UpperCAmelCase__ , pairID=UpperCAmelCase__ ) )
return examples
def a_ ( lowerCamelCase : List[InputExample] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : PreTrainedTokenizer , ):
lowerCAmelCase = {label: i for i, label in enumerate(lowerCamelCase )}
lowerCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCAmelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase , max_length=lowerCamelCase , padding='max_length' , truncation=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , )
lowerCAmelCase = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase , label=lowerCamelCase , pairID=lowerCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
__snake_case ={
"""hans""": 3,
}
__snake_case ={
"""hans""": HansProcessor,
}
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def __UpperCAmelCase ( UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowerCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
lowerCAmelCase = kwargs.pop('main_process_only' , UpperCAmelCase__ )
lowerCAmelCase = kwargs.pop('in_order' , UpperCAmelCase__ )
if self.isEnabledFor(UpperCAmelCase__ ):
if self._should_log(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
elif in_order:
lowerCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCAmelCase , lowerCAmelCase = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
state.wait_for_everyone()
def a_ ( lowerCamelCase : str , lowerCamelCase : str = None ):
if log_level is None:
lowerCAmelCase = os.environ.get('ACCELERATE_LOG_LEVEL' , lowerCamelCase )
lowerCAmelCase = logging.getLogger(lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCamelCase , {} )
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=9_9 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : str=3_7 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Any=None , ) -> Dict:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Dict ) -> str:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Dict:
lowerCAmelCase = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , ) -> Optional[int]:
lowerCAmelCase = True
lowerCAmelCase = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , ) -> Tuple:
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , ) -> Optional[int]:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : List[Any] = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Tuple = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = FalconModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
lowerCAmelCase , *lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , 'use_cache' ):
return
lowerCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
lowerCAmelCase = True
lowerCAmelCase = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase = (
getattr(UpperCAmelCase__ , 'decoder_layers' , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , 'num_decoder_layers' , UpperCAmelCase__ )
or config.num_hidden_layers
)
lowerCAmelCase = getattr(UpperCAmelCase__ , 'num_kv_heads' , config.num_attention_heads )
lowerCAmelCase = getattr(UpperCAmelCase__ , 'd_model' , config.hidden_size )
lowerCAmelCase = embed_dim // num_attention_heads
lowerCAmelCase = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = inputs['input_ids'].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCAmelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
lowerCAmelCase = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=1_9 )
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
__snake_case ={
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCamelCase : Optional[Any] = '''nezha'''
def __init__( self : List[str] , UpperCAmelCase__ : str=2_1_1_2_8 , UpperCAmelCase__ : Optional[int]=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Any=1_2 , UpperCAmelCase__ : Optional[Any]=3_0_7_2 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : Optional[int]=6_4 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = max_relative_position
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = use_cache
| 4 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = '''dpt'''
def __init__( self : int , UpperCAmelCase__ : List[Any]=7_6_8 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : List[str]=3_0_7_2 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=1E-12 , UpperCAmelCase__ : List[str]=3_8_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=[2, 5, 8, 1_1] , UpperCAmelCase__ : Union[str, Any]="project" , UpperCAmelCase__ : List[Any]=[4, 2, 1, 0.5] , UpperCAmelCase__ : List[Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=0.4 , UpperCAmelCase__ : Any=2_5_5 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=[1, 1_0_2_4, 2_4, 2_4] , UpperCAmelCase__ : Any=[0, 1] , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : str , ) -> int:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = hidden_size
lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
lowerCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info('Initializing the config with a `BiT` backbone.' )
lowerCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowerCAmelCase = backbone_featmap_shape
lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = []
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
lowerCAmelCase = readout_type
lowerCAmelCase = reassemble_factors
lowerCAmelCase = neck_hidden_sizes
lowerCAmelCase = fusion_hidden_size
lowerCAmelCase = head_in_index
lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = semantic_loss_ignore_index
lowerCAmelCase = semantic_classifier_dropout
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool , UpperCAmelCase__ : str = None , UpperCAmelCase__ : list = None ) -> Dict:
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowerCAmelCase = os.path.abspath('examples' )
for item in os.listdir(UpperCAmelCase__ ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if os.path.isfile(UpperCAmelCase__ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase__ , feature_script=UpperCAmelCase__ , tested_section='main()' if parser_only else 'training_function()' , ):
lowerCAmelCase = compare_against_test(
os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = '\n'.join(UpperCAmelCase__ )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(UpperCAmelCase__ , '' )
self.assertEqual(UpperCAmelCase__ , '' )
def __UpperCAmelCase ( self : Tuple ) -> str:
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
self.one_complete_example('complete_nlp_example.py' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowerCAmelCase = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
self.one_complete_example('complete_cv_example.py' , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = False
@classmethod
def __UpperCAmelCase ( cls : str ) -> str:
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls : Tuple ) -> List[str]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
else:
self.assertIn('epoch 0:' , UpperCAmelCase__ )
self.assertIn('epoch 1:' , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase__ )
lowerCAmelCase = re.findall('({.+})' , UpperCAmelCase__ )
lowerCAmelCase = [r for r in results if 'accuracy' in r][-1]
lowerCAmelCase = ast.literal_eval(UpperCAmelCase__ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase__ , 'tracking' ) ) )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 4 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case =abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def a_ ( lowerCamelCase : int ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
| 4 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = ''''''
lowerCamelCase : Any = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : Any , UpperCAmelCase__ : Optional[DatasetInfo] = None , UpperCAmelCase__ : Optional[str] = None , **UpperCAmelCase__ : Any , ) -> Union[str, Any]:
super().__init__(self , **UpperCAmelCase__ )
lowerCAmelCase = repo_info
lowerCAmelCase = token
lowerCAmelCase = None
def __UpperCAmelCase ( self : Any ) -> List[str]:
if self.dir_cache is None:
lowerCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCAmelCase = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(UpperCAmelCase__ ): {'name': str(UpperCAmelCase__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : str = "rb" , **UpperCAmelCase__ : Union[str, Any] , ) -> Tuple:
if not isinstance(self.repo_info , UpperCAmelCase__ ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowerCAmelCase = hf_hub_url(self.repo_info.id , UpperCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCAmelCase__ , mode=UpperCAmelCase__ , headers=get_authentication_headers_for_url(UpperCAmelCase__ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : List[str] ) -> Any:
self._get_dirs()
lowerCAmelCase = self._strip_protocol(UpperCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : Dict ) -> List[Any]:
self._get_dirs()
lowerCAmelCase = PurePosixPath(path.strip('/' ) )
lowerCAmelCase = {}
for p, f in self.dir_cache.items():
lowerCAmelCase = PurePosixPath(p.strip('/' ) )
lowerCAmelCase = p.parent
if root == path:
lowerCAmelCase = f
lowerCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 4 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[str]=1_0 , UpperCAmelCase__ : Any=[8, 1_6, 3_2, 6_4] , UpperCAmelCase__ : Optional[int]=[1, 1, 2, 1] , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]="relu" , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Dict=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Optional[Any]=[2, 3, 4] , UpperCAmelCase__ : List[str]=1 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = embeddings_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = scope
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = num_groups
def __UpperCAmelCase ( self : str ) -> str:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ) -> str:
lowerCAmelCase = BitModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = BitForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> str:
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = BitBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase : Union[str, Any] = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : List[str] = False
lowerCamelCase : List[Any] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = BitModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self : Any ) -> str:
pass
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(config=UpperCAmelCase__ )
for name, module in model.named_modules():
if isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __UpperCAmelCase ( self : int ) -> Any:
def check_hidden_states_output(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ):
lowerCAmelCase = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase = layer_type
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
def __UpperCAmelCase ( self : str ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = BitModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCAmelCase__ )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**UpperCAmelCase__ )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase : Optional[int] = BitConfig
lowerCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = BitModelTester(self )
| 4 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 1 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCAmelCase_ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCamelCase : List[Any] = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
lowerCAmelCase = CursorInfo()
lowerCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
lowerCAmelCase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
lowerCAmelCase = CursorInfo()
lowerCAmelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
lowerCAmelCase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 4 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Any = DebertaTokenizer
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = DebertaTokenizerFast
def __UpperCAmelCase ( self : Dict ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
lowerCAmelCase = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCAmelCase = {'unk_token': '[UNK]'}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Any ) -> Tuple:
lowerCAmelCase = 'lower newer'
lowerCAmelCase = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = 'lower newer'
lowerCAmelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = tokenizer('Hello' , 'World' )
lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase = tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
lowerCAmelCase = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ )
lowerCAmelCase = [tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) for seq in encoding['input_ids']]
# fmt: off
lowerCAmelCase = {
'input_ids': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , UpperCAmelCase__ )
for expected, decoded in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 4 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case ="""▁"""
__snake_case =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : int = BertGenerationTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Dict = True
def __UpperCAmelCase ( self : int ) -> int:
super().setUp()
lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = '<s>'
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_2 )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = BertGenerationTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ )
self.assertListEqual(
UpperCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> int:
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = 'Hello World!'
lowerCAmelCase = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) )
@require_torch
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = BertGenerationConfig()
lowerCAmelCase = BertGenerationEncoder(UpperCAmelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase__ )
model(**UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[str]:
# fmt: off
lowerCAmelCase = {'input_ids': [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase__ , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]=0.01 , UpperCAmelCase__ : List[Any]=1_0_0_0 ) -> Optional[Any]:
lowerCAmelCase = p_stop
lowerCAmelCase = max_length
def __iter__( self : Optional[int] ) -> List[str]:
lowerCAmelCase = 0
lowerCAmelCase = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase = random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=True ) -> List[Any]:
lowerCAmelCase = [
BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
for i in range(2 )
]
lowerCAmelCase = [list(UpperCAmelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCAmelCase__ ) for shard in batch_sampler_shards] , [len(UpperCAmelCase__ ) for e in expected] )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
# Check the shards when the dataset is very small.
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Check the shards when the dataset is very small.
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is very small.
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
# Check the shards when the dataset is very small.
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
lowerCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = [[], []]
self.check_batch_sampler_shards(UpperCAmelCase__ , UpperCAmelCase__ , split_batches=UpperCAmelCase__ , even_batches=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
lowerCAmelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
lowerCAmelCase = [BatchSamplerShard(UpperCAmelCase__ , 2 , UpperCAmelCase__ , even_batches=UpperCAmelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : int=False ) -> Optional[int]:
random.seed(UpperCAmelCase__ )
lowerCAmelCase = list(UpperCAmelCase__ )
lowerCAmelCase = [
IterableDatasetShard(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , drop_last=UpperCAmelCase__ , num_processes=UpperCAmelCase__ , process_index=UpperCAmelCase__ , split_batches=UpperCAmelCase__ , )
for i in range(UpperCAmelCase__ )
]
lowerCAmelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCAmelCase__ )
iterable_dataset_lists.append(list(UpperCAmelCase__ ) )
lowerCAmelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
self.assertTrue(len(UpperCAmelCase__ ) % shard_batch_size == 0 )
lowerCAmelCase = []
for idx in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCAmelCase__ ) < len(UpperCAmelCase__ ):
reference += reference
self.assertListEqual(UpperCAmelCase__ , reference[: len(UpperCAmelCase__ )] )
def __UpperCAmelCase ( self : List[str] ) -> int:
lowerCAmelCase = 4_2
lowerCAmelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
# Edge case with a very small dataset
lowerCAmelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
self.check_iterable_dataset_shards(UpperCAmelCase__ , UpperCAmelCase__ , batch_size=4 , drop_last=UpperCAmelCase__ , split_batches=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=UpperCAmelCase__ )
lowerCAmelCase = SkipBatchSampler(UpperCAmelCase__ , 2 )
self.assertListEqual(list(UpperCAmelCase__ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = DataLoader(list(range(1_6 ) ) , batch_size=4 )
lowerCAmelCase = skip_first_batches(UpperCAmelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
lowerCAmelCase = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
Accelerator()
lowerCAmelCase = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 4 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__snake_case =sys.version_info >= (3, 10)
def a_ ( lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int
lowerCamelCase : float
lowerCamelCase : str
lowerCamelCase : bool
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int = 42
lowerCamelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : Optional[bool] = None
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = '''titi'''
lowerCamelCase : List[str] = '''toto'''
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[str] = '''titi'''
lowerCamelCase : Any = '''toto'''
lowerCamelCase : Union[str, Any] = 42
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : BasicEnum = "toto"
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : MixedTypeEnum = "toto"
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[float] = field(default=__lowercase , metadata={'''help''': '''help message'''} )
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[List[str]] = list_field(default=[] )
lowerCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : List[int] = list_field(default=[] )
lowerCamelCase : List[int] = list_field(default=[1, 2, 3] )
lowerCamelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
lowerCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : List[int] = field()
lowerCamelCase : str = field()
lowerCamelCase : BasicEnum = field()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
lowerCAmelCase = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int
lowerCamelCase : "BasicEnum" = field()
lowerCamelCase : "Optional[bool]" = None
lowerCamelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
lowerCamelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : bool = False
lowerCamelCase : bool = True
lowerCamelCase : bool | None = None
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int | None = None
lowerCamelCase : float | None = field(default=__lowercase , metadata={'''help''': '''help message'''} )
lowerCamelCase : str | None = None
lowerCamelCase : list[str] | None = list_field(default=[] )
lowerCamelCase : list[int] | None = list_field(default=[] )
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : argparse.ArgumentParser , UpperCAmelCase__ : argparse.ArgumentParser ) -> Dict:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'}
lowerCAmelCase = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , UpperCAmelCase__ ) and yy.get('choices' , UpperCAmelCase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](UpperCAmelCase__ ) , yy['type'](UpperCAmelCase__ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--bar' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--baz' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--flag' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((lowerCAmelCase) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase__ , look_for_args_file=UpperCAmelCase__ )
self.assertFalse(example.flag )
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , default=4_2 , type=UpperCAmelCase__ )
expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' )
expected.add_argument('--baz' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , const=UpperCAmelCase__ , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=UpperCAmelCase__ , dest='baz' )
expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
lowerCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
lowerCAmelCase = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , baz=UpperCAmelCase__ , opt=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
lowerCAmelCase = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCAmelCase ( self : int ) -> Dict:
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Literal["titi", "toto", 42] = "toto"
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
lowerCAmelCase = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
lowerCAmelCase = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=UpperCAmelCase__ )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('--bar' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='help message' )
expected.add_argument('--baz' , default=UpperCAmelCase__ , type=UpperCAmelCase__ )
expected.add_argument('--ces' , nargs='+' , default=[] , type=UpperCAmelCase__ )
expected.add_argument('--des' , nargs='+' , default=[] , type=UpperCAmelCase__ )
lowerCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase__ )
for dataclass_type in dataclass_types:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_args([] )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=UpperCAmelCase__ , bar=UpperCAmelCase__ , baz=UpperCAmelCase__ , ces=[] , des=[] ) )
lowerCAmelCase = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(UpperCAmelCase__ , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument('--required_str' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = argparse.ArgumentParser()
expected.add_argument('--foo' , type=UpperCAmelCase__ , required=UpperCAmelCase__ )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=UpperCAmelCase__ , )
expected.add_argument('--opt' , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
expected.add_argument('--baz' , default='toto' , type=UpperCAmelCase__ , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=UpperCAmelCase__ )
self.argparsersEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
lowerCAmelCase = parser.parse_dict(UpperCAmelCase__ )[0]
lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 4_2,
}
self.assertRaises(UpperCAmelCase__ , parser.parse_dict , UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_json' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
lowerCAmelCase = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'temp_yaml' )
os.mkdir(UpperCAmelCase__ )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
lowerCAmelCase = BasicExample(**UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> int:
lowerCAmelCase = HfArgumentParser(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 1 |
'''simple docstring'''
def a_ ( ):
return 1
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCamelCase )
def a_ ( lowerCamelCase : int ):
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCamelCase )
def a_ ( lowerCamelCase : int = 200 ):
return two_pound(lowerCamelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 4 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = '''upernet'''
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=5_1_2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : str=[1, 2, 3, 6] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=0.4 , UpperCAmelCase__ : int=3_8_4 , UpperCAmelCase__ : Dict=2_5_6 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Any=2_5_5 , **UpperCAmelCase__ : Union[str, Any] , ) -> List[str]:
super().__init__(**UpperCAmelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config.get('model_type' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(UpperCAmelCase__ )
lowerCAmelCase = backbone_config
lowerCAmelCase = hidden_size
lowerCAmelCase = initializer_range
lowerCAmelCase = pool_scales
lowerCAmelCase = use_auxiliary_head
lowerCAmelCase = auxiliary_loss_weight
lowerCAmelCase = auxiliary_in_channels
lowerCAmelCase = auxiliary_channels
lowerCAmelCase = auxiliary_num_convs
lowerCAmelCase = auxiliary_concat_input
lowerCAmelCase = loss_ignore_index
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 4 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : list ):
if len(lowerCamelCase ) <= 1:
return [tuple(lowerCamelCase )]
lowerCAmelCase = []
def generate(lowerCamelCase : int , lowerCamelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCamelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , lowerCamelCase )
generate(len(lowerCamelCase ) , lowerCamelCase )
return res
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 4 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 1 |
'''simple docstring'''
__snake_case =frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__snake_case =frozenset(["""prompt""", """negative_prompt"""])
__snake_case =frozenset([])
__snake_case =frozenset(["""image"""])
__snake_case =frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__snake_case =frozenset(["""image"""])
__snake_case =frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__snake_case =frozenset(["""prompt""", """image""", """negative_prompt"""])
__snake_case =frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__snake_case =frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__snake_case =frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__snake_case =frozenset(["""image""", """mask_image"""])
__snake_case =frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__snake_case =frozenset(["""example_image""", """image""", """mask_image"""])
__snake_case =frozenset(["""class_labels"""])
__snake_case =frozenset(["""class_labels"""])
__snake_case =frozenset(["""batch_size"""])
__snake_case =frozenset([])
__snake_case =frozenset(["""batch_size"""])
__snake_case =frozenset([])
__snake_case =frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__snake_case =frozenset(["""prompt""", """negative_prompt"""])
__snake_case =frozenset(["""input_tokens"""])
__snake_case =frozenset(["""input_tokens"""])
| 4 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCamelCase )
lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
lowerCAmelCase = 847
lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
lowerCAmelCase = 150
lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
lowerCAmelCase = 171
lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
lowerCAmelCase = 133
lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
lowerCAmelCase = 19
lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
lowerCAmelCase = 65
lowerCAmelCase = 'mapillary-vistas-id2label.json'
lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def a_ ( lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[Any] ):
lowerCAmelCase = dct.pop(lowerCamelCase )
lowerCAmelCase = val
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Any ):
lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowerCAmelCase = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:dim, :]
lowerCAmelCase = in_proj_bias[: dim]
lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase = in_proj_weight[
-dim :, :
]
lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCamelCase : Dict , lowerCamelCase : List[str] ):
# fmt: off
lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[: hidden_size, :]
lowerCAmelCase = in_proj_bias[:config.hidden_size]
lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase = in_proj_weight[-hidden_size :, :]
lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[: hidden_size, :]
lowerCAmelCase = in_proj_bias[:config.hidden_size]
lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
lowerCAmelCase = in_proj_weight[-hidden_size :, :]
lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def a_ ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : bool = False ):
lowerCAmelCase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , 'rb' ) as f:
lowerCAmelCase = pickle.load(lowerCamelCase )
lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCAmelCase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCAmelCase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
lowerCAmelCase , lowerCAmelCase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCAmelCase = prepare_img()
if "vistas" in model_name:
lowerCAmelCase = 65
elif "cityscapes" in model_name:
lowerCAmelCase = 65535
else:
lowerCAmelCase = 255
lowerCAmelCase = True if 'ade' in model_name else False
lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
lowerCAmelCase = image_processor(lowerCamelCase , return_tensors='pt' )
lowerCAmelCase = model(**lowerCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCAmelCase = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def a_ ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
lowerCAmelCase = precision
lowerCAmelCase = ceil(precision / 14 )
lowerCAmelCase = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase = 1
lowerCAmelCase = 13591409
lowerCAmelCase = Decimal(lowerCamelCase )
for k in range(1 , lowerCamelCase ):
lowerCAmelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__snake_case =50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 4 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__snake_case ="""docs/source/en/_toctree.yml"""
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = defaultdict(lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase = [key for key, value in counts.items() if value > 1]
lowerCAmelCase = []
for duplicate_key in duplicates:
lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowerCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowerCamelCase , key=lambda lowerCamelCase : s["title"].lower() )
def a_ ( lowerCamelCase : List[Any]=False ):
with open(lowerCamelCase , encoding='utf-8' ) as f:
lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase = api_doc[model_idx]['sections']
lowerCAmelCase = [(idx, section) for idx, section in enumerate(lowerCamelCase ) if 'sections' in section]
lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase = modality_doc['sections']
lowerCAmelCase = clean_model_doc_toc(lowerCamelCase )
if old_modality_doc != new_modality_doc:
lowerCAmelCase = True
if overwrite:
lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase = model_doc
lowerCAmelCase = api_doc
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase , allow_unicode=lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__snake_case =parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class UpperCAmelCase_ ( __lowercase ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __lowercase , )
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__snake_case =False
__snake_case =True
__snake_case =False
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__snake_case =parser.parse_args()
__snake_case ={
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__snake_case ={
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__snake_case ="""""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__snake_case =reader.read()
__snake_case =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__snake_case =UNetaDModel(**config)
else:
__snake_case =UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__snake_case =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__snake_case =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__snake_case =config[key]
del config[key]
__snake_case =[k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__snake_case =[k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__snake_case =torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__snake_case ={}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__snake_case =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__snake_case =param_value
__snake_case =True
if not has_changed:
__snake_case =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
__snake_case ={str(digit): digit**5 for digit in range(10)}
def a_ ( lowerCamelCase : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase ) )
def a_ ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCamelCase ) )
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[str] ) -> None:
lowerCAmelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase = Vector()
def __UpperCAmelCase ( self : Union[str, Any] ) -> None:
lowerCAmelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase__ ) , '(0,0,0,0,0,1)' )
def __UpperCAmelCase ( self : List[str] ) -> None:
lowerCAmelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase__ ) , 4 )
def __UpperCAmelCase ( self : Optional[int] ) -> None:
lowerCAmelCase = Vector([1, 2] )
lowerCAmelCase = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __UpperCAmelCase ( self : int ) -> None:
lowerCAmelCase = Vector([1, 2, 3] )
lowerCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __UpperCAmelCase ( self : Optional[Any] ) -> None:
lowerCAmelCase = Vector([1, 2, 3] )
lowerCAmelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = Vector([1, 2, 3] )
lowerCAmelCase = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> None:
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def __UpperCAmelCase ( self : Tuple ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> None:
lowerCAmelCase = Vector([1, 2, 3] )
lowerCAmelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase__ , UpperCAmelCase__ ) ) , '(3,4,7)' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> None:
lowerCAmelCase = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase = x.copy()
self.assertEqual(str(UpperCAmelCase__ ) , str(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase__ ) , '(0,1,0)' )
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : List[Any] ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : List[str] ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __UpperCAmelCase ( self : int ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : int ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __UpperCAmelCase ( self : List[str] ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def __UpperCAmelCase ( self : Tuple ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 4 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case ={
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__snake_case =logging.get_logger(__name__)
# General docstring
__snake_case ="""RegNetConfig"""
# Base docstring
__snake_case ="""facebook/regnet-y-040"""
__snake_case =[1, 1_088, 7, 7]
# Image classification docstring
__snake_case ="""facebook/regnet-y-040"""
__snake_case ="""tabby, tabby cat"""
__snake_case =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[str] = "relu" , ) -> Any:
super().__init__()
lowerCAmelCase = nn.Convad(
UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=UpperCAmelCase__ , stride=UpperCAmelCase__ , padding=kernel_size // 2 , groups=UpperCAmelCase__ , bias=UpperCAmelCase__ , )
lowerCAmelCase = nn.BatchNormad(UpperCAmelCase__ )
lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple ) -> Tuple:
lowerCAmelCase = self.convolution(UpperCAmelCase__ )
lowerCAmelCase = self.normalization(UpperCAmelCase__ )
lowerCAmelCase = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase__ : RegNetConfig ) -> Tuple:
super().__init__()
lowerCAmelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase = config.num_channels
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowerCAmelCase = self.embedder(UpperCAmelCase__ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 ) -> Optional[int]:
super().__init__()
lowerCAmelCase = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , stride=UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowerCAmelCase = nn.BatchNormad(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Tensor ) -> Tensor:
lowerCAmelCase = self.convolution(UpperCAmelCase__ )
lowerCAmelCase = self.normalization(UpperCAmelCase__ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase = nn.Sequential(
nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : str ) -> Optional[Any]:
# b c h w -> b c 1 1
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
lowerCAmelCase = self.attention(UpperCAmelCase__ )
lowerCAmelCase = hidden_state * attention
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase = in_channels != out_channels or stride != 1
lowerCAmelCase = max(1 , out_channels // config.groups_width )
lowerCAmelCase = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
lowerCAmelCase = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Any ) -> Union[str, Any]:
lowerCAmelCase = hidden_state
lowerCAmelCase = self.layer(UpperCAmelCase__ )
lowerCAmelCase = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
lowerCAmelCase = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : str , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 1 ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = in_channels != out_channels or stride != 1
lowerCAmelCase = max(1 , out_channels // config.groups_width )
lowerCAmelCase = (
RegNetShortCut(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase = nn.Sequential(
RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , groups=UpperCAmelCase__ , activation=config.hidden_act ) , RegNetSELayer(UpperCAmelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCAmelCase__ , UpperCAmelCase__ , kernel_size=1 , activation=UpperCAmelCase__ ) , )
lowerCAmelCase = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Union[str, Any] ) -> Tuple:
lowerCAmelCase = hidden_state
lowerCAmelCase = self.layer(UpperCAmelCase__ )
lowerCAmelCase = self.shortcut(UpperCAmelCase__ )
hidden_state += residual
lowerCAmelCase = self.activation(UpperCAmelCase__ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : RegNetConfig , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , stride=UpperCAmelCase__ , ) , *[layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) for _ in range(depth - 1 )] , )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) -> Tuple:
lowerCAmelCase = self.layers(UpperCAmelCase__ )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : RegNetConfig ) -> Dict:
super().__init__()
lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
UpperCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , depth=UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase = hidden_states + (hidden_state,)
lowerCAmelCase = stage_module(UpperCAmelCase__ )
if output_hidden_states:
lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = RegNetConfig
lowerCamelCase : Any = '''regnet'''
lowerCamelCase : Any = '''pixel_values'''
lowerCamelCase : Union[str, Any] = True
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Optional[int]:
if isinstance(UpperCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=False ) -> Any:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = value
__snake_case =R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__snake_case =R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] ) -> List[Any]:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config
lowerCAmelCase = RegNetEmbeddings(UpperCAmelCase__ )
lowerCAmelCase = RegNetEncoder(UpperCAmelCase__ )
lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase = self.embedder(UpperCAmelCase__ )
lowerCAmelCase = self.encoder(
UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
lowerCAmelCase = encoder_outputs[0]
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase__ , pooler_output=UpperCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> str:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config.num_labels
lowerCAmelCase = RegNetModel(UpperCAmelCase__ )
# classification head
lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase = self.regnet(UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )
lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase = self.classifier(UpperCAmelCase__ )
lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase = 'single_label_classification'
else:
lowerCAmelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCAmelCase = MSELoss()
if self.num_labels == 1:
lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase = BCEWithLogitsLoss()
lowerCAmelCase = loss_fct(UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase__ , logits=UpperCAmelCase__ , hidden_states=outputs.hidden_states )
| 4 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__snake_case ="""
Human: <<task>>
Assistant: """
__snake_case ="""huggingface-tools/default-prompts"""
__snake_case ={"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]="run" ):
if prompt_or_repo_id is None:
lowerCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , lowerCamelCase ) is not None:
return prompt_or_repo_id
lowerCAmelCase = cached_file(
lowerCamelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(lowerCamelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 4 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=9_9 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=3_7 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Any=4 , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : str ) -> str:
lowerCAmelCase = FlaxRoFormerModelTester(self )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCAmelCase__ )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
lowerCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
lowerCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = 5_0_0_0_0
lowerCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Initialise PyTorch model
lowerCAmelCase = AlbertConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase = AlbertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 4 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase = os.path.join(self.tmpdirname , UpperCAmelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , **UpperCAmelCase__ : Optional[int] ) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , **UpperCAmelCase__ : Optional[int] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , **UpperCAmelCase__ : int ) -> Union[str, Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = AlignProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase__ )
lowerCAmelCase = AlignProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
lowerCAmelCase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
lowerCAmelCase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = AlignProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(UpperCAmelCase__ , return_tensors='np' )
lowerCAmelCase = processor(images=UpperCAmelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = AlignProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = processor(text=UpperCAmelCase__ )
lowerCAmelCase = tokenizer(UpperCAmelCase__ , padding='max_length' , max_length=6_4 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = AlignProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = AlignProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = AlignProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
lowerCAmelCase = 'lower newer'
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 4 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : torch.FloatTensor
class UpperCAmelCase_ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Any , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase__ : Tuple[int] = (6_4,) , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : str = "silu" , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 2_5_6 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : float = 0.18_215 , UpperCAmelCase__ : str = "group" , ) -> int:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , down_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , double_z=UpperCAmelCase__ , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
lowerCAmelCase = VectorQuantizer(UpperCAmelCase__ , UpperCAmelCase__ , beta=0.25 , remap=UpperCAmelCase__ , sane_index_shape=UpperCAmelCase__ )
lowerCAmelCase = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , up_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , norm_type=UpperCAmelCase__ , )
@apply_forward_hook
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(UpperCAmelCase__ )
lowerCAmelCase = self.quant_conv(UpperCAmelCase__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase__ )
@apply_forward_hook
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(UpperCAmelCase__ )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(UpperCAmelCase__ )
lowerCAmelCase = self.decoder(UpperCAmelCase__ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(UpperCAmelCase__ ).latents
lowerCAmelCase = self.decode(UpperCAmelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def a_ ( lowerCamelCase : Callable , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
lowerCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase = np.zeros((n + 1,) )
lowerCAmelCase = ya
lowerCAmelCase = xa
for k in range(lowerCamelCase ):
lowerCAmelCase = y[k] + step_size * ode_func(lowerCamelCase , y[k] )
lowerCAmelCase = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase , y[k] ) + ode_func(x + step_size , lowerCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
def get_matched_characters(lowerCamelCase : str , lowerCamelCase : str ) -> str:
lowerCAmelCase = []
lowerCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCAmelCase = int(max(0 , i - limit ) )
lowerCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase )
lowerCAmelCase = f'''{_stra[0:_stra.index(lowerCamelCase )]} {_stra[_stra.index(lowerCamelCase ) + 1:]}'''
return "".join(lowerCamelCase )
# matching characters
lowerCAmelCase = get_matched_characters(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = get_matched_characters(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = len(lowerCamelCase )
# transposition
lowerCAmelCase = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase , lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
lowerCAmelCase = 0.0
else:
lowerCAmelCase = (
1
/ 3
* (
match_count / len(lowerCamelCase )
+ match_count / len(lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 4 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = StableDiffusionLatentUpscalePipeline
lowerCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
lowerCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase : List[str] = frozenset([] )
lowerCamelCase : Optional[int] = True
@property
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
lowerCAmelCase = 1
lowerCAmelCase = 4
lowerCAmelCase = (1_6, 1_6)
lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase__ )
return image
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=UpperCAmelCase__ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=UpperCAmelCase__ , only_cross_attention=UpperCAmelCase__ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
lowerCAmelCase = EulerDiscreteScheduler(prediction_type='sample' )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
lowerCAmelCase = CLIPTextModel(UpperCAmelCase__ )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]=0 ) -> Union[str, Any]:
if str(UpperCAmelCase__ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : List[Any] ) -> Any:
lowerCAmelCase = 'cpu'
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ )
lowerCAmelCase = pipe(**UpperCAmelCase__ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
lowerCAmelCase = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : Tuple ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : str ) -> int:
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**UpperCAmelCase__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ )
lowerCAmelCase = 2
lowerCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowerCAmelCase = getattr(UpperCAmelCase__ , scheduler_enum.name )
lowerCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
lowerCAmelCase = pipe(**UpperCAmelCase__ )[0]
outputs.append(UpperCAmelCase__ )
assert check_same_shape(UpperCAmelCase__ )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Dict ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = torch.manual_seed(3_3 )
lowerCAmelCase = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
lowerCAmelCase = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
lowerCAmelCase = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='latent' ).images
lowerCAmelCase = upscaler(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=2_0 , guidance_scale=0 , generator=UpperCAmelCase__ , output_type='np' , ).images[0]
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase = torch.manual_seed(3_3 )
lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
lowerCAmelCase = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
lowerCAmelCase = upscaler(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , num_inference_steps=2_0 , guidance_scale=0 , generator=UpperCAmelCase__ , output_type='np' , ).images[0]
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 4 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 1 |
'''simple docstring'''
import heapq
def a_ ( lowerCamelCase : dict ):
lowerCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCAmelCase = heapq.heappop(lowerCamelCase )[1][0]
chosen_vertices.add(lowerCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCAmelCase = elem[1][1].index(lowerCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 4 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
__snake_case =logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def __UpperCAmelCase ( self : List[str] ) -> str:
if self.train_file is not None:
lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> str:
lowerCAmelCase = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase = [feature.pop(UpperCAmelCase__ ) for feature in features]
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = len(features[0]['input_ids'] )
lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCAmelCase__ )] for feature in features
]
lowerCAmelCase = list(chain(*UpperCAmelCase__ ) )
lowerCAmelCase = self.tokenizer.pad(
UpperCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
lowerCAmelCase = {k: v.view(UpperCAmelCase__ , UpperCAmelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
lowerCAmelCase = torch.tensor(UpperCAmelCase__ , dtype=torch.intaa )
return batch
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCAmelCase = {}
if data_args.train_file is not None:
lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase = data_args.validation_file
lowerCAmelCase = data_args.train_file.split('.' )[-1]
lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCAmelCase = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
lowerCAmelCase = 'sent1'
lowerCAmelCase = 'sent2'
if data_args.max_seq_length is None:
lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
lowerCAmelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Optional[Any] ):
lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
lowerCAmelCase = examples[question_header_name]
lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
lowerCAmelCase = list(chain(*lowerCamelCase ) )
lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCAmelCase = raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCAmelCase = raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : List[Any] ):
lowerCAmelCase , lowerCAmelCase = eval_predictions
lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase = train_result.metrics
lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics('train' , lowerCamelCase )
trainer.save_metrics('train' , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase = trainer.evaluate()
lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics('eval' , lowerCamelCase )
trainer.save_metrics('eval' , lowerCamelCase )
lowerCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def a_ ( lowerCamelCase : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=9_9 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Dict=3_7 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : Tuple=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : int=None , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : str ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowerCAmelCase = NystromformerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> Any:
lowerCAmelCase = NystromformerForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase = NystromformerForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ) -> Union[str, Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = NystromformerForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> Any:
lowerCAmelCase = self.num_labels
lowerCAmelCase = NystromformerForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Any ) -> Dict:
lowerCAmelCase = self.num_choices
lowerCAmelCase = NystromformerForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Any = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Tuple = False
lowerCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
lowerCAmelCase = NystromformerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Any ) -> Dict:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = NystromformerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase = 'the [MASK] of Belgium is Brussels'
lowerCAmelCase = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
lowerCAmelCase = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
lowerCAmelCase = tokenizer(UpperCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
lowerCAmelCase = model(encoding.input_ids ).logits
lowerCAmelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCAmelCase__ ) , 'capital' )
| 4 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : Any = '''resnet'''
lowerCamelCase : Optional[Any] = ['''basic''', '''bottleneck''']
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : int=6_4 , UpperCAmelCase__ : List[Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , UpperCAmelCase__ : Dict=[3, 4, 6, 3] , UpperCAmelCase__ : List[Any]="bottleneck" , UpperCAmelCase__ : Dict="relu" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : List[str] , ) -> List[str]:
super().__init__(**UpperCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowerCAmelCase = num_channels
lowerCAmelCase = embedding_size
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = layer_type
lowerCAmelCase = hidden_act
lowerCAmelCase = downsample_in_first_stage
lowerCAmelCase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> float:
return 1E-3
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=1_0_0 , UpperCAmelCase__ : str=1_3 , UpperCAmelCase__ : Dict=3_0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[str]=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=1_0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Tuple=3 , ) -> Dict:
lowerCAmelCase = parent
lowerCAmelCase = vocab_size
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = (image_size // patch_size) ** 2
lowerCAmelCase = num_patches + 1
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ) -> str:
lowerCAmelCase = FlaxBeitModel(config=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> Any:
lowerCAmelCase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Optional[int]:
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = FlaxBeitForImageClassification(UpperCAmelCase__ )
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __UpperCAmelCase ( self : Any ) -> None:
lowerCAmelCase = FlaxBeitModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
lowerCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] ):
return model(pixel_values=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
lowerCAmelCase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ):
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : str ) -> str:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
lowerCAmelCase = np.ones((1, 1_9_6) , dtype=UpperCAmelCase__ )
# forward pass
lowerCAmelCase = model(pixel_values=UpperCAmelCase__ , bool_masked_pos=UpperCAmelCase__ )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , UpperCAmelCase__ )
lowerCAmelCase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase__ , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='np' )
# forward pass
lowerCAmelCase = model(**UpperCAmelCase__ )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = (1, 1_0_0_0)
self.assertEqual(logits.shape , UpperCAmelCase__ )
lowerCAmelCase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
lowerCAmelCase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCAmelCase__ , return_tensors='np' )
# forward pass
lowerCAmelCase = model(**UpperCAmelCase__ )
lowerCAmelCase = outputs.logits
# verify the logits
lowerCAmelCase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , UpperCAmelCase__ )
lowerCAmelCase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
lowerCAmelCase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase__ )
| 4 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = BertTokenizer
lowerCamelCase : Tuple = BertTokenizerFast
lowerCamelCase : Optional[Any] = True
lowerCamelCase : str = True
lowerCamelCase : Optional[int] = filter_non_english
def __UpperCAmelCase ( self : str ) -> Optional[int]:
super().setUp()
lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Any ) -> Optional[Any]:
lowerCAmelCase = 'UNwant\u00E9d,running'
lowerCAmelCase = 'unwanted, running'
return input_text, output_text
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = self.tokenizer_class(self.vocab_file )
lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCAmelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = 'UNwant\u00E9d,running'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# With lower casing
lowerCAmelCase = self.get_tokenizer(do_lower_case=UpperCAmelCase__ )
lowerCAmelCase = self.get_rust_tokenizer(do_lower_case=UpperCAmelCase__ )
lowerCAmelCase = 'UNwant\u00E9d,running'
lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ )
lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self : Dict ) -> Any:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : Dict ) -> int:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = BasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = BasicTokenizer()
lowerCAmelCase = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCAmelCase = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase__ ) , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase = {}
for i, token in enumerate(UpperCAmelCase__ ):
lowerCAmelCase = i
lowerCAmelCase = WordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self : str ) -> str:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self : Any ) -> str:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self : int ) -> List[Any]:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCAmelCase = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , 'do_lower_case' ) else False
lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self : Any ) -> int:
lowerCAmelCase = ['的', '人', '有']
lowerCAmelCase = ''.join(UpperCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = False
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ )
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[Any]=8 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[Any]=9_9 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : int=3_6 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Any=None , ) -> int:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Any ) -> List[str]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Dict ) -> Dict:
lowerCAmelCase = self.get_config()
lowerCAmelCase = 3_0_0
return config
def __UpperCAmelCase ( self : str ) -> List[str]:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ) -> int:
lowerCAmelCase = MraModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , ) -> List[str]:
lowerCAmelCase = True
lowerCAmelCase = MraModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = MraForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) -> int:
lowerCAmelCase = MraForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MraForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MraForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = self.num_choices
lowerCAmelCase = MraForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
lowerCamelCase : List[Any] = ()
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
lowerCAmelCase = MraModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : str ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : str ) -> str:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MraModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
return
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = 5_0_2_6_5
lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = 5_0_2_6_5
lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : str = "layer_norm" , UpperCAmelCase__ : bool = False , ) -> List[Any]:
super().__init__()
lowerCAmelCase = only_cross_attention
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase = AdaLayerNorm(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase = AdaLayerNormZero(UpperCAmelCase__ , UpperCAmelCase__ )
else:
lowerCAmelCase = nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ )
lowerCAmelCase = Attention(
query_dim=UpperCAmelCase__ , heads=UpperCAmelCase__ , dim_head=UpperCAmelCase__ , dropout=UpperCAmelCase__ , bias=UpperCAmelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase = (
AdaLayerNorm(UpperCAmelCase__ , UpperCAmelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ )
)
lowerCAmelCase = Attention(
query_dim=UpperCAmelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase__ , dim_head=UpperCAmelCase__ , dropout=UpperCAmelCase__ , bias=UpperCAmelCase__ , upcast_attention=UpperCAmelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase = None
lowerCAmelCase = None
# 3. Feed-forward
lowerCAmelCase = nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ )
lowerCAmelCase = FeedForward(UpperCAmelCase__ , dropout=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , final_dropout=UpperCAmelCase__ )
# let chunk size default to None
lowerCAmelCase = None
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ) -> Optional[int]:
# Sets chunk feed-forward
lowerCAmelCase = chunk_size
lowerCAmelCase = dim
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , UpperCAmelCase__ : Dict[str, Any] = None , UpperCAmelCase__ : Optional[torch.LongTensor] = None , ) -> Any:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCAmelCase = self.norma(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.norma(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase = self.norma(UpperCAmelCase__ )
lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase = self.attna(
UpperCAmelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase = (
self.norma(UpperCAmelCase__ , UpperCAmelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase__ )
)
lowerCAmelCase = self.attna(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase = self.norma(UpperCAmelCase__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase = torch.cat(
[self.ff(UpperCAmelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase = self.ff(UpperCAmelCase__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase = ff_output + hidden_states
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : bool = False , ) -> str:
super().__init__()
lowerCAmelCase = int(dim * mult )
lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase = GELU(UpperCAmelCase__ , UpperCAmelCase__ )
if activation_fn == "gelu-approximate":
lowerCAmelCase = GELU(UpperCAmelCase__ , UpperCAmelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
lowerCAmelCase = GEGLU(UpperCAmelCase__ , UpperCAmelCase__ )
elif activation_fn == "geglu-approximate":
lowerCAmelCase = ApproximateGELU(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(UpperCAmelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCAmelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple ) -> str:
for module in self.net:
lowerCAmelCase = module(UpperCAmelCase__ )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "none" ) -> List[Any]:
super().__init__()
lowerCAmelCase = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = approximate
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Optional[int] ) -> Any:
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Tuple ) -> List[str]:
lowerCAmelCase = self.proj(UpperCAmelCase__ )
lowerCAmelCase = self.gelu(UpperCAmelCase__ )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
super().__init__()
lowerCAmelCase = nn.Linear(UpperCAmelCase__ , dim_out * 2 )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.proj(UpperCAmelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCAmelCase__ )
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[str] ) -> List[Any]:
lowerCAmelCase = self.proj(UpperCAmelCase__ )
return x * torch.sigmoid(1.702 * x )
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
super().__init__()
lowerCAmelCase = nn.Embedding(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(UpperCAmelCase__ , embedding_dim * 2 )
lowerCAmelCase = nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> Dict:
lowerCAmelCase = self.linear(self.silu(self.emb(UpperCAmelCase__ ) ) )
lowerCAmelCase , lowerCAmelCase = torch.chunk(UpperCAmelCase__ , 2 )
lowerCAmelCase = self.norm(UpperCAmelCase__ ) * (1 + scale) + shift
return x
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ) -> Dict:
super().__init__()
lowerCAmelCase = CombinedTimestepLabelEmbeddings(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = nn.SiLU()
lowerCAmelCase = nn.Linear(UpperCAmelCase__ , 6 * embedding_dim , bias=UpperCAmelCase__ )
lowerCAmelCase = nn.LayerNorm(UpperCAmelCase__ , elementwise_affine=UpperCAmelCase__ , eps=1E-6 )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=None ) -> List[Any]:
lowerCAmelCase = self.linear(self.silu(self.emb(UpperCAmelCase__ , UpperCAmelCase__ , hidden_dtype=UpperCAmelCase__ ) ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = emb.chunk(6 , dim=1 )
lowerCAmelCase = self.norm(UpperCAmelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : float = 1E-5 ) -> Optional[Any]:
super().__init__()
lowerCAmelCase = num_groups
lowerCAmelCase = eps
if act_fn is None:
lowerCAmelCase = None
else:
lowerCAmelCase = get_activation(UpperCAmelCase__ )
lowerCAmelCase = nn.Linear(UpperCAmelCase__ , out_dim * 2 )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Tuple:
if self.act:
lowerCAmelCase = self.act(UpperCAmelCase__ )
lowerCAmelCase = self.linear(UpperCAmelCase__ )
lowerCAmelCase = emb[:, :, None, None]
lowerCAmelCase , lowerCAmelCase = emb.chunk(2 , dim=1 )
lowerCAmelCase = F.group_norm(UpperCAmelCase__ , self.num_groups , eps=self.eps )
lowerCAmelCase = x * (1 + scale) + shift
return x
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__snake_case =[
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def a_ ( lowerCamelCase : Optional[Any]=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Dict = None
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> int:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = dataset_module_factory(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase__ )
lowerCAmelCase = builder_cls(
cache_dir=UpperCAmelCase__ , config_name=UpperCAmelCase__ , hash=dataset_module.hash , )
lowerCAmelCase = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase__ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase = cached_path(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
self.assertTrue(os.path.exists(UpperCAmelCase__ ) )
@pytest.mark.integration
def a_ ( lowerCamelCase : Tuple ):
lowerCAmelCase = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path )
lowerCAmelCase = builder_cls(
cache_dir=lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase = None
builder_instance.download_and_prepare()
lowerCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a_ ( lowerCamelCase : Optional[int] ):
lowerCAmelCase = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase )
lowerCAmelCase = import_main_class(dataset_module.module_path , dataset=lowerCamelCase )
lowerCAmelCase = builder_cls(
cache_dir=lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
lowerCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase , lowerCamelCase )
assert "train" in ds
assert isinstance(ds['train'] , lowerCamelCase )
assert next(iter(ds['train'] ) )
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 1 |