code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A__(__snake_case ):
"""simple docstring"""
_A : List[Any] = '''Salesforce/blip-image-captioning-base'''
_A : Any = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
_A : Dict = '''image_captioner'''
_A : Tuple = AutoModelForVisionaSeq
_A : Tuple = ['''image''']
_A : Optional[int] = ['''text''']
def __init__( self , *_lowercase , **_lowercase ) -> List[str]:
requires_backends(self , ["""vision"""] )
super().__init__(*_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> Optional[Any]:
return self.pre_processor(images=_lowercase , return_tensors="""pt""" )
def UpperCamelCase__ ( self , _lowercase ) -> Optional[int]:
return self.model.generate(**_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> str:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )[0].strip()
| 248 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class A ( __snake_case ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 3 | 0 |
import requests
from bsa import BeautifulSoup
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , """html.parser""" )
lowercase : Dict = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
lowercase : Optional[int] = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 20 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 | 0 |
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE_ ( __snake_case ):
__magic_name__: List[Any] = "WhisperFeatureExtractor"
__magic_name__: Optional[int] = "WhisperTokenizer"
def __init__( self : int , _A : Tuple , _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__(_A , _A )
snake_case_ : List[Any] = self.feature_extractor
snake_case_ : Union[str, Any] = False
def UpperCAmelCase_ ( self : List[str] , _A : Union[str, Any]=None , _A : List[Any]=None , _A : Any=True ) -> Any:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_A , language=_A , no_timestamps=_A )
def __call__( self : Optional[Any] , *_A : Any , **_A : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
snake_case_ : Optional[Any] = kwargs.pop('audio' , _A )
snake_case_ : Any = kwargs.pop('sampling_rate' , _A )
snake_case_ : Optional[int] = kwargs.pop('text' , _A )
if len(_A ) > 0:
snake_case_ : Optional[int] = args[0]
snake_case_ : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
snake_case_ : int = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
snake_case_ : Tuple = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case_ : List[str] = encodings['''input_ids''']
return inputs
def UpperCAmelCase_ ( self : Any , *_A : Tuple , **_A : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase_ ( self : Any , *_A : List[Any] , **_A : Dict ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
def UpperCAmelCase_ ( self : Optional[int] , _A : int , _A : Dict="np" ) -> Dict:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(_A , return_tensors=_A )
| 327 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase : Dict = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_a : Any = 0
_a : List[Any] = str(snake_case__ )
while len(snake_case__ ) != 1:
_a : str = [int(snake_case__ ) for i in num_string]
_a : List[str] = 1
for i in range(0 , len(snake_case__ ) ):
total *= numbers[i]
_a : Optional[Any] = str(snake_case__ )
steps += 1
return steps
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_a : Tuple = 0
_a : Dict = str(snake_case__ )
while len(snake_case__ ) != 1:
_a : Any = [int(snake_case__ ) for i in num_string]
_a : Optional[int] = 0
for i in range(0 , len(snake_case__ ) ):
total += numbers[i]
_a : Tuple = str(snake_case__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[int]:
_lowerCAmelCase : str = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ) -> Dict:
_lowerCAmelCase : Optional[Any] = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> int:
_lowerCAmelCase : Tuple = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : Optional[int] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ) -> Any:
_lowerCAmelCase : List[Any] = '''imagenet-1k-id2label.json'''
_lowerCAmelCase : str = 1000
_lowerCAmelCase : Tuple = '''huggingface/label-files'''
_lowerCAmelCase : List[Any] = num_labels
_lowerCAmelCase : List[Any] = json.load(open(cached_download(hf_hub_url(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ) ,"""r""" ) )
_lowerCAmelCase : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowerCAmelCase : int = idalabel
_lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : List[str] = CvtConfig(num_labels=snake_case__ ,idalabel=snake_case__ ,labelaid=snake_case__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" ,1 )[-1][4:6] == "13":
_lowerCAmelCase : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" ,1 )[-1][4:6] == "21":
_lowerCAmelCase : Optional[Any] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCAmelCase : Optional[Any] = [2, 2, 20]
_lowerCAmelCase : int = [3, 12, 16]
_lowerCAmelCase : str = [192, 768, 1024]
_lowerCAmelCase : Union[str, Any] = CvtForImageClassification(snake_case__ )
_lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Dict = torch.load(snake_case__ ,map_location=torch.device("""cpu""" ) )
_lowerCAmelCase : int = OrderedDict()
_lowerCAmelCase : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCAmelCase : Optional[int] = list_of_state_dict + cls_token(snake_case__ )
_lowerCAmelCase : List[Any] = list_of_state_dict + embeddings(snake_case__ )
for cnt in range(config.depth[idx] ):
_lowerCAmelCase : Dict = list_of_state_dict + attention(snake_case__ ,snake_case__ )
_lowerCAmelCase : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case__ )
for i in range(len(snake_case__ ) ):
_lowerCAmelCase : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 44 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( __snake_case ):
__magic_name__ = DistilBertTokenizer
__magic_name__ = DistilBertTokenizerFast
__magic_name__ = True
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 3 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase__ ( unittest.TestCase):
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE : str = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE : str = tf_top_k_top_p_filtering(UpperCamelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE : Optional[int] = output[output != -float('''inf''' )]
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(
tf.where(tf.not_equal(UpperCamelCase__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-12 )
tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
@require_tf
class lowercase__ ( unittest.TestCase , __snake_case):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
UpperCamelCase_ = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = 2
class lowercase__ ( tf.Module):
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
super(UpperCamelCase__ , self ).__init__()
SCREAMING_SNAKE_CASE : str = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase__ , )
def __A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model.generate(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , max_new_tokens=UpperCamelCase__ , return_dict_in_generate=UpperCamelCase__ , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : Tuple = [[2, 0], [102, 103]]
SCREAMING_SNAKE_CASE : List[Any] = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE : Optional[int] = DummyModel(model=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE : Tuple = tf.saved_model.load(UpperCamelCase__ ).signatures['''serving_default''']
for batch_size in range(1 , len(UpperCamelCase__ ) + 1 ):
SCREAMING_SNAKE_CASE : List[Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE : List[Any] = serving_func(**UpperCamelCase__ )['''sequences''']
SCREAMING_SNAKE_CASE : Optional[Any] = test_model.generate(**UpperCamelCase__ , max_new_tokens=UpperCamelCase__ )
tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
@slow
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : List[Any] = 2
class lowercase__ ( tf.Module):
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
super(UpperCamelCase__ , self ).__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=UpperCamelCase__ , )
def __A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model.generate(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , max_new_tokens=UpperCamelCase__ , return_dict_in_generate=UpperCamelCase__ , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE : Optional[int] = [[2], [102, 103]]
SCREAMING_SNAKE_CASE : List[str] = [[1], [1, 1]]
SCREAMING_SNAKE_CASE : Optional[int] = DummyModel(model=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE : Tuple = tf.saved_model.load(UpperCamelCase__ ).signatures['''serving_default''']
for input_row in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE : Tuple = serving_func(**UpperCamelCase__ )['''sequences''']
SCREAMING_SNAKE_CASE : List[str] = test_model.generate(**UpperCamelCase__ , max_new_tokens=UpperCamelCase__ )
tf.debugging.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
@slow
@require_tensorflow_text
def __A ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCamelCase__ )
class lowercase__ ( tf.keras.layers.Layer):
def __init__( self : List[Any] ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCamelCase__ , '''spiece.model''' ) , '''rb''' ).read() )
SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def __A ( self : Optional[Any] , UpperCamelCase__ : str , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer.tokenize(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = text.pad_model_inputs(
UpperCamelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE : List[Any] = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
return self.tokenizer.detokenize(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE : int = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
SCREAMING_SNAKE_CASE : Optional[int] = complete_model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tf.keras.Model(UpperCamelCase__ , UpperCamelCase__ )
keras_model.save(UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
SCREAMING_SNAKE_CASE : List[Any] = 14
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello, my dog is cute and'''
SCREAMING_SNAKE_CASE : List[str] = tokenizer(UpperCamelCase__ , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : int = model.generate(**UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE : str = [638, 198]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : Any = model.generate(**UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE : Tuple = '''Hugging Face is a technology company based in New York and Paris.'''
SCREAMING_SNAKE_CASE : Any = bart_tokenizer(UpperCamelCase__ , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : Tuple = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = bart_model.generate(UpperCamelCase__ ).numpy()
class lowercase__ ( __snake_case):
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
return super().call(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE : int = bart_model.generate(UpperCamelCase__ , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(UpperCamelCase__ , UpperCamelCase__ ) )
class lowercase__ ( bart_model.model.encoder.__class__):
def __A ( self : Optional[int] , UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return super().call(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE : Any = bart_model.generate(UpperCamelCase__ ).numpy()
with self.assertRaises(UpperCamelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCamelCase__ , foo='''bar''' )
| 182 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase : Optional[int] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''input_features''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = num_mel_bins
A : Tuple = do_ceptral_normalize
A : Dict = normalize_means
A : List[Any] = normalize_vars
A : List[str] = True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
A : Dict = x[:input_length].mean(axis=0 )
A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if normalize_vars:
A : str = x[:input_length].std(axis=0 )
A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
A : List[str] = padding_value
# make sure array is in float32
A : Tuple = x.astype(np.floataa )
return x
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : Tuple = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : Any = [raw_speech]
# extract fbank features
A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
A : str = BatchFeature({'''input_features''': features} )
A : Union[str, Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# make sure list is in array format
A : List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
A : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A : Dict = (
np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE )
if return_tensors is not None:
A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
| 3 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
return (data["data"], data["target"])
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : int = XGBClassifier()
classifier.fit(snake_case__ , snake_case__ )
return classifier
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = load_iris()
lowercase_ : List[str] = data_handling(snake_case__ )
lowercase_ : Optional[Any] = train_test_split(
snake_case__ , snake_case__ , test_size=0.25 )
lowercase_ : Dict = iris['''target_names''']
# Create an XGBoost Classifier from the training data
lowercase_ : Tuple = xgboost(snake_case__ , snake_case__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case__ , snake_case__ , snake_case__ , display_labels=snake_case__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 93 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowercase : str = get_tests_dir('fixtures/vocab.json')
lowercase : int = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = 0
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Union[str, Any] = WavaVecaConfig()
A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Dict = WavaVecaFeatureExtractor()
A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : List[Any] = WavaVecaFeatureExtractor()
A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : str = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write('''{}''' )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
A : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = '''AutoFeatureExtractor'''
__magic_name__ = '''AutoTokenizer'''
__magic_name__ = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A : Optional[int] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A : Tuple = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __lowerCAmelCase ( cls ) -> Dict:
"""simple docstring"""
A : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , )
A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : str = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 3 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
__lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
__lowerCamelCase : str = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __snake_case ( __snake_case , unittest.TestCase ):
lowerCAmelCase_ = CamembertTokenizer
lowerCAmelCase_ = CamembertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def __a ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = CamembertTokenizer(_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''<pad>'''
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_lowercase ) , 10_04 )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CamembertTokenizer(_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_lowercase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def __a ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(_lowercase )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {'''input_ids''': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=_lowercase , )
| 219 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : Optional[Any] = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase : List[str] = {
'google/rembert': 2_56,
}
lowercase : Dict = '▁'
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = RemBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : List[Any] = do_lower_case
A : str = remove_space
A : int = keep_accents
A : Union[str, Any] = vocab_file
A : List[Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : List[Any] = [self.sep_token_id]
A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Tuple = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
A : Any = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 3 | 0 |
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any:
lowerCAmelCase_ = data
lowerCAmelCase_ = previous
lowerCAmelCase_ = next_node
def __str__( self ) -> str:
return f"""{self.data}"""
def __a ( self ) -> int:
return self.data
def __a ( self ) -> List[str]:
return self.next
def __a ( self ) -> Tuple:
return self.previous
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = head
def __iter__( self ) -> Any:
return self
def __a ( self ) -> int:
if not self.current:
raise StopIteration
else:
lowerCAmelCase_ = self.current.get_data()
lowerCAmelCase_ = self.current.get_next()
return value
class _lowerCAmelCase :
def __init__( self ) -> Optional[Any]:
lowerCAmelCase_ = None # First node in list
lowerCAmelCase_ = None # Last node in list
def __str__( self ) -> int:
lowerCAmelCase_ = self.head
lowerCAmelCase_ = []
while current is not None:
nodes.append(current.get_data() )
lowerCAmelCase_ = current.get_next()
return " ".join(str(_UpperCamelCase ) for node in nodes )
def __contains__( self , _UpperCamelCase ) -> str:
lowerCAmelCase_ = self.head
while current:
if current.get_data() == value:
return True
lowerCAmelCase_ = current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def __a ( self ) -> Tuple:
if self.head:
return self.head.get_data()
return None
def __a ( self ) -> Union[str, Any]:
if self.tail:
return self.tail.get_data()
return None
def __a ( self , _UpperCamelCase ) -> None:
if self.head is None:
lowerCAmelCase_ = node
lowerCAmelCase_ = node
else:
self.insert_before_node(self.head , _UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> None:
if self.head is None:
self.set_head(_UpperCamelCase )
else:
self.insert_after_node(self.tail , _UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = Node(_UpperCamelCase )
if self.head is None:
self.set_head(_UpperCamelCase )
else:
self.set_tail(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
lowerCAmelCase_ = node
lowerCAmelCase_ = node.previous
if node.get_previous() is None:
lowerCAmelCase_ = node_to_insert
else:
lowerCAmelCase_ = node_to_insert
lowerCAmelCase_ = node_to_insert
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
lowerCAmelCase_ = node
lowerCAmelCase_ = node.next
if node.get_next() is None:
lowerCAmelCase_ = node_to_insert
else:
lowerCAmelCase_ = node_to_insert
lowerCAmelCase_ = node_to_insert
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
lowerCAmelCase_ = 1
lowerCAmelCase_ = Node(_UpperCamelCase )
lowerCAmelCase_ = self.head
while node:
if current_position == position:
self.insert_before_node(_UpperCamelCase , _UpperCamelCase )
return
current_position += 1
lowerCAmelCase_ = node.next
self.insert_after_node(self.tail , _UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> Node:
lowerCAmelCase_ = self.head
while node:
if node.get_data() == item:
return node
lowerCAmelCase_ = node.get_next()
raise Exception("Node not found" )
def __a ( self , _UpperCamelCase ) -> Dict:
if (node := self.get_node(_UpperCamelCase )) is not None:
if node == self.head:
lowerCAmelCase_ = self.head.get_next()
if node == self.tail:
lowerCAmelCase_ = self.tail.get_previous()
self.remove_node_pointers(_UpperCamelCase )
@staticmethod
def __a ( _UpperCamelCase ) -> None:
if node.get_next():
lowerCAmelCase_ = node.previous
if node.get_previous():
lowerCAmelCase_ = node.next
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __a ( self ) -> int:
return self.head is None
def lowerCamelCase__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Optional[Any] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : str = size if size is not None else {'''shortest_edge''': 384}
A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : str = do_resize
A : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : List[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : Union[str, Any] = do_rescale
A : List[str] = rescale_factor
A : Union[str, Any] = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A : Any = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
A : int = do_resize if do_resize is not None else self.do_resize
A : Tuple = crop_pct if crop_pct is not None else self.crop_pct
A : Optional[Any] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : List[str] = image_std if image_std is not None else self.image_std
A : Union[str, Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Any = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 3 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Tuple=50267 , UpperCamelCase : int=1024 , UpperCamelCase : Dict=12 , UpperCamelCase : int=4096 , UpperCamelCase : List[str]=16 , UpperCamelCase : int=12 , UpperCamelCase : Dict=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Dict=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Optional[int]=1024 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Any=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : str=0 , UpperCamelCase : int=2 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=2 , UpperCamelCase : int=2 , UpperCamelCase : List[Any]=False , UpperCamelCase : Tuple=100 , UpperCamelCase : Dict=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ):
A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 3 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase__ : List[str] = get_tests_dir('''fixtures''')
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = mock.Mock()
lowerCAmelCase = 500
lowerCAmelCase = {}
lowerCAmelCase = HTTPError
lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__SCREAMING_SNAKE_CASE ) as mock_head:
lowerCAmelCase = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
lowerCAmelCase = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowerCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@is_staging_test
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) ->Union[str, Any]:
lowerCAmelCase = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) ->Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = ViTImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
lowerCAmelCase = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''test-image-processor''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowerCAmelCase = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = ViTImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
lowerCAmelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
lowerCAmelCase = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
CustomImageProcessor.register_for_auto_class()
lowerCAmelCase = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
lowerCAmelCase = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 338 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase : Union[str, Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = {}
A : str = R'''.*sequential.(\d+).*'''
A : Union[str, Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : Any = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
# replace sequential layers with list
A : Any = re.match(snake_case__ , snake_case__ ).group(1 )
A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' )
elif re.match(snake_case__ , snake_case__ ):
A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A : str = 1 if projecton_layer == 0 else 2
A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A : int = value
A : List[Any] = mixed_qkv.size(0 ) // 3
A : Union[str, Any] = mixed_qkv[:qkv_dim]
A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
A : Optional[int] = mixed_qkv[qkv_dim * 2 :]
A : Tuple = query_layer
A : Union[str, Any] = key_layer
A : Optional[int] = value_layer
else:
A : Dict = value
return model_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ )
clap_model.eval()
A : str = clap_model.state_dict()
A : Union[str, Any] = rename_state_dict(snake_case__ )
A : Tuple = ClapConfig()
A : str = enable_fusion
A : str = ClapModel(snake_case__ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.save_pretrained(snake_case__ )
transformers_config.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 3 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__snake_case : str = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
__snake_case : str = get_tests_dir("""fixtures/vocab.json""")
__snake_case : int = get_tests_dir("""fixtures""")
class A__(unittest.TestCase ):
"""simple docstring"""
_A : int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Tuple = 0
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : List[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Union[str, Any] = WavaVecaConfig()
a_ : List[str] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
a_ : List[str] = AutoProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_lowercase , os.path.join(_lowercase , _lowercase ) )
copyfile(_lowercase , os.path.join(_lowercase , """vocab.json""" ) )
a_ : Optional[Any] = AutoProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = WavaVecaFeatureExtractor()
a_ : List[str] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
a_ : str = WavaVecaProcessor(_lowercase , _lowercase )
# save in new folder
processor.save_pretrained(_lowercase )
# drop `processor_class` in tokenizer
with open(os.path.join(_lowercase , _lowercase ) , """r""" ) as f:
a_ : Dict = json.load(_lowercase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_lowercase , _lowercase ) , """w""" ) as f:
f.write(json.dumps(_lowercase ) )
a_ : Optional[Any] = AutoProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : List[Any] = WavaVecaFeatureExtractor()
a_ : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
a_ : str = WavaVecaProcessor(_lowercase , _lowercase )
# save in new folder
processor.save_pretrained(_lowercase )
# drop `processor_class` in feature extractor
with open(os.path.join(_lowercase , _lowercase ) , """r""" ) as f:
a_ : str = json.load(_lowercase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_lowercase , _lowercase ) , """w""" ) as f:
f.write(json.dumps(_lowercase ) )
a_ : str = AutoProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : str = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_lowercase )
# copy relevant files
copyfile(_lowercase , os.path.join(_lowercase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_lowercase , _lowercase ) , """w""" ) as f:
f.write("""{}""" )
a_ : List[str] = AutoProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Any:
with self.assertRaises(_lowercase ):
a_ : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
a_ : Union[str, Any] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowercase )
a_ : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowercase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
a_ : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
a_ : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
a_ : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowercase , use_fast=_lowercase )
a_ : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCamelCase__ ( self ) -> Optional[Any]:
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
AutoProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoProcessor.register(_lowercase , _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
a_ : List[Any] = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : Tuple = os.path.join(_lowercase , """vocab.txt""" )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
a_ : Optional[int] = CustomTokenizer(_lowercase )
a_ : Any = CustomProcessor(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_lowercase )
a_ : List[str] = AutoProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ) -> Optional[int]:
class A__(__snake_case ):
"""simple docstring"""
_A : Optional[int] = False
class A__(__snake_case ):
"""simple docstring"""
_A : str = False
class A__(__snake_case ):
"""simple docstring"""
_A : str = '''AutoFeatureExtractor'''
_A : List[Any] = '''AutoTokenizer'''
_A : int = False
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
AutoTokenizer.register(_lowercase , slow_tokenizer_class=_lowercase )
AutoProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local classes.
a_ : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
a_ : Optional[int] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowercase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
a_ : Tuple = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowercase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ) -> str:
a_ : int = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def UpperCamelCase__ ( self ) -> str:
a_ : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A__(unittest.TestCase ):
"""simple docstring"""
_A : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def UpperCamelCase__ ( cls ) -> Dict:
a_ : Optional[int] = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def UpperCamelCase__ ( cls ) -> Any:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> Dict:
a_ : Union[str, Any] = WavaVecaProcessor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_lowercase , """test-processor""" ) , push_to_hub=_lowercase , use_auth_token=self._token )
a_ : int = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(new_processor.feature_extractor , _lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Tuple = WavaVecaProcessor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_lowercase , """test-processor-org""" ) , push_to_hub=_lowercase , use_auth_token=self._token , organization="""valid_org""" , )
a_ : int = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(new_processor.feature_extractor , _lowercase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase__ ( self ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
a_ : Any = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : Union[str, Any] = os.path.join(_lowercase , """vocab.txt""" )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
a_ : str = CustomTokenizer(_lowercase )
a_ : Union[str, Any] = CustomProcessor(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
a_ : List[str] = Repository(_lowercase , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(_lowercase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_lowercase , """tokenizer_config.json""" ) ) as f:
a_ : Dict = json.load(_lowercase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_lowercase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_lowercase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_lowercase , """custom_processing.py""" ) ) )
repo.push_to_hub()
a_ : Optional[int] = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 248 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : List[Any] = 0
lowercase : Optional[int] = len(snake_case__ ) # No of vertices in graph
lowercase : List[str] = [0] * n
lowercase : Any = [False] * n
def dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = True
lowercase : Tuple = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case__ , snake_case__ , snake_case__ , id_ )
lowercase : Union[str, Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase : str = min(low[at] , low[to] )
lowercase : list[tuple[int, int]] = []
for i in range(snake_case__ ):
if not visited[i]:
dfs(snake_case__ , -1 , snake_case__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowercase : Optional[Any] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
A : str = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('''emb.''' ):
A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ )
# ffn -> feed_forward
A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A : List[Any] = '''rwkv.''' + name
A : Dict = weight
return state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ):
'''simple docstring'''
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A : int = 5_0277
A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
A : Any = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A : List[str] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
A : Any = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ )
A : Tuple = torch.load(snake_case__ , map_location='''cpu''' )
A : List[Any] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
A, A : List[str] = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
A : Dict = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A : int = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowercase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 3 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
for attribute in key.split('.' ):
snake_case_ : int = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
snake_case_ : Optional[int] = getattr(snake_case__ , snake_case__ ).shape
else:
snake_case_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case_ : Optional[int] = value
elif weight_type == "weight_g":
snake_case_ : str = value
elif weight_type == "weight_v":
snake_case_ : str = value
elif weight_type == "bias":
snake_case_ : List[str] = value
else:
snake_case_ : Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : List[Any] = []
snake_case_ : Optional[int] = fairseq_model.state_dict()
snake_case_ : Optional[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : str = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ : Union[str, Any] = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(snake_case__ )[0].split('.' )[-2]
snake_case_ : Optional[int] = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
snake_case_ : List[str] = '''weight_g'''
elif "weight_v" in name:
snake_case_ : Union[str, Any] = '''weight_v'''
elif "weight" in name:
snake_case_ : Dict = '''weight'''
elif "bias" in name:
snake_case_ : Dict = '''bias'''
else:
snake_case_ : Optional[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
snake_case_ : Union[str, Any] = full_name.split('conv_layers.' )[-1]
snake_case_ : List[Any] = name.split('.' )
snake_case_ : Union[str, Any] = int(items[0] )
snake_case_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case_ : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case_ : Optional[int] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case_ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case_ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Dict = SEWConfig()
if is_finetuned:
snake_case_ : List[Any] = model.wav_encoder.wav_model.cfg
else:
snake_case_ : str = model.cfg
snake_case_ : int = fs_config.conv_bias
snake_case_ : Optional[int] = eval(fs_config.conv_feature_layers )
snake_case_ : Union[str, Any] = [x[0] for x in conv_layers]
snake_case_ : Dict = [x[1] for x in conv_layers]
snake_case_ : Dict = [x[2] for x in conv_layers]
snake_case_ : str = '''gelu'''
snake_case_ : Tuple = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
snake_case_ : Optional[Any] = 0.0
snake_case_ : List[str] = fs_config.activation_fn.name
snake_case_ : List[str] = fs_config.encoder_embed_dim
snake_case_ : Optional[Any] = 0.02
snake_case_ : Any = fs_config.encoder_ffn_embed_dim
snake_case_ : str = 1E-5
snake_case_ : str = fs_config.encoder_layerdrop
snake_case_ : List[str] = fs_config.encoder_attention_heads
snake_case_ : List[str] = fs_config.conv_pos_groups
snake_case_ : str = fs_config.conv_pos
snake_case_ : Union[str, Any] = len(snake_case__ )
snake_case_ : str = fs_config.encoder_layers
snake_case_ : List[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case_ : Optional[int] = model.cfg
snake_case_ : Union[str, Any] = fs_config.final_dropout
snake_case_ : Optional[int] = fs_config.layerdrop
snake_case_ : str = fs_config.activation_dropout
snake_case_ : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case_ : Dict = fs_config.attention_dropout
snake_case_ : List[str] = fs_config.dropout_input
snake_case_ : Union[str, Any] = fs_config.dropout
snake_case_ : int = fs_config.mask_channel_length
snake_case_ : str = fs_config.mask_channel_prob
snake_case_ : List[str] = fs_config.mask_length
snake_case_ : Tuple = fs_config.mask_prob
snake_case_ : Optional[int] = '''Wav2Vec2FeatureExtractor'''
snake_case_ : Union[str, Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None , __a=None , __a=True ):
if is_finetuned:
snake_case_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case_ : List[str] = SEWConfig.from_pretrained(snake_case__ )
else:
snake_case_ : Dict = convert_config(model[0] , snake_case__ )
snake_case_ : str = model[0].eval()
snake_case_ : Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
snake_case_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
snake_case_ : Optional[Any] = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ : Optional[Any] = target_dict.pad_index
snake_case_ : Optional[Any] = target_dict.bos_index
snake_case_ : Union[str, Any] = target_dict.pad_index
snake_case_ : Union[str, Any] = target_dict.bos_index
snake_case_ : int = target_dict.eos_index
snake_case_ : Tuple = len(target_dict.symbols )
snake_case_ : List[str] = os.path.join(snake_case__ , 'vocab.json' )
if not os.path.isdir(snake_case__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
snake_case_ : str = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , )
snake_case_ : List[str] = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
snake_case_ : Tuple = SEWForCTC(snake_case__ )
else:
snake_case_ : Dict = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 327 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class A ( __snake_case ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A, A : Dict = {}, {}
if padding is not None:
A : List[str] = padding
if truncation is not None:
A : Dict = truncation
if top_k is not None:
A : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = {'''image''': image, '''question''': question}
else:
A : Any = image
A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
A : Union[str, Any] = load_image(inputs['''image'''] )
A : Optional[Any] = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int:
"""simple docstring"""
if top_k > self.model.config.num_labels:
A : Dict = self.model.config.num_labels
if self.framework == "pt":
A : Optional[int] = model_outputs.logits.sigmoid()[0]
A, A : int = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A : int = scores.tolist()
A : List[str] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 3 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = len(snake_case__ )
_a : Dict = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_a : int = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
_a : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : int ) -> List[Any]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Dict = getattr(snake_case__ ,snake_case__ )
if weight_type is not None:
_lowerCAmelCase : Dict = getattr(snake_case__ ,snake_case__ ).shape
else:
_lowerCAmelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_g":
_lowerCAmelCase : str = value
elif weight_type == "weight_v":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "bias":
_lowerCAmelCase : List[str] = value
else:
_lowerCAmelCase : Tuple = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Dict ) -> Optional[int]:
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = fairseq_model.state_dict()
_lowerCAmelCase : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_lowerCAmelCase : str = True
if "*" in mapped_key:
_lowerCAmelCase : str = name.split(snake_case__ )[0].split(""".""" )[-2]
_lowerCAmelCase : int = mapped_key.replace("""*""" ,snake_case__ )
if "weight_g" in name:
_lowerCAmelCase : List[str] = '''weight_g'''
elif "weight_v" in name:
_lowerCAmelCase : str = '''weight_v'''
elif "weight" in name:
_lowerCAmelCase : List[str] = '''weight'''
elif "bias" in name:
_lowerCAmelCase : Optional[Any] = '''bias'''
else:
_lowerCAmelCase : Optional[Any] = None
set_recursively(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[Any] ) -> Any:
_lowerCAmelCase : Optional[int] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : Dict = name.split(""".""" )
_lowerCAmelCase : Union[str, Any] = int(items[0] )
_lowerCAmelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_lowerCAmelCase : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_lowerCAmelCase : Any = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_lowerCAmelCase : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Any=None ,_lowerCamelCase : Dict=None ,_lowerCamelCase : Union[str, Any]=True ) -> Optional[Any]:
if config_path is not None:
_lowerCAmelCase : Tuple = HubertConfig.from_pretrained(snake_case__ )
else:
_lowerCAmelCase : Dict = HubertConfig()
if is_finetuned:
if dict_path:
_lowerCAmelCase : Any = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : str = target_dict.pad_index
_lowerCAmelCase : Optional[int] = target_dict.bos_index
_lowerCAmelCase : Optional[int] = target_dict.eos_index
_lowerCAmelCase : Optional[int] = len(target_dict.symbols )
_lowerCAmelCase : Union[str, Any] = os.path.join(snake_case__ ,"""vocab.json""" )
if not os.path.isdir(snake_case__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__ ) )
return
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices ,snake_case__ )
_lowerCAmelCase : Optional[int] = WavaVecaCTCTokenizer(
snake_case__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=snake_case__ ,)
_lowerCAmelCase : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
_lowerCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=snake_case__ ,return_attention_mask=snake_case__ ,)
_lowerCAmelCase : Dict = WavaVecaProcessor(feature_extractor=snake_case__ ,tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
_lowerCAmelCase : Union[str, Any] = HubertForCTC(snake_case__ )
else:
_lowerCAmelCase : List[str] = HubertModel(snake_case__ )
if is_finetuned:
_lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCAmelCase : int = model[0].eval()
recursively_load_weights(snake_case__ ,snake_case__ ,snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_a : Dict = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 44 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3 | 0 |
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Any = len(snake_case__ )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A ( _lowercase ):
if len(snake_case__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE : Any = len(snake_case__ ) // 2
SCREAMING_SNAKE_CASE : List[Any] = arr[0:mid]
SCREAMING_SNAKE_CASE : Union[str, Any] = arr[mid:]
SCREAMING_SNAKE_CASE : List[str] = count_inversions_recursive(snake_case__ )
SCREAMING_SNAKE_CASE : Dict = count_inversions_recursive(snake_case__ )
SCREAMING_SNAKE_CASE : Dict = _count_cross_inversions(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while i < len(snake_case__ ) and j < len(snake_case__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE : List[Any] = count_inversions_bf(snake_case__ )
SCREAMING_SNAKE_CASE : int = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , snake_case__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE : Tuple = count_inversions_bf(snake_case__ )
SCREAMING_SNAKE_CASE : Optional[int] = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , snake_case__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[Any] = count_inversions_bf(snake_case__ )
SCREAMING_SNAKE_CASE : Optional[int] = count_inversions_recursive(snake_case__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , snake_case__ )
if __name__ == "__main__":
main()
| 182 |
'''simple docstring'''
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Any = None
A : Optional[Any] = None
A : Tuple = graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = len(SCREAMING_SNAKE_CASE )
A : Optional[Any] = None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if sources is int:
A : Dict = [sources]
if sinks is int:
A : str = [sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
A : Optional[int] = sources[0]
A : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
A : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A : Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A : Dict = max_input_flow
A : Tuple = 0
A : Tuple = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A : Optional[Any] = max_input_flow
A : Optional[Any] = size - 1
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = algorithm(self )
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = flow_network
A : Optional[Any] = flow_network.verticesCount
A : Tuple = flow_network.sourceIndex
A : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A : str = flow_network.graph
A : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if not self.executed:
self._algorithm()
A : Optional[int] = True
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
A : List[str] = -1
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A : Union[str, Any] = [0] * self.verticies_count
A : List[Any] = [0] * self.verticies_count
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A : Union[str, Any] = 0
while i < len(SCREAMING_SNAKE_CASE ):
A : str = vertices_list[i]
A : List[str] = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
A : int = 0
else:
i += 1
A : Optional[Any] = sum(self.preflow[self.source_index] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Dict = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A : Dict = self.heights[to_index]
if min_height is not None:
A : Dict = min_height + 1
if __name__ == "__main__":
lowercase : Optional[int] = [0]
lowercase : List[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowercase : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowercase : List[str] = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 3 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict = 10 ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError('''Invalid input''' )
lowercase_ : List[str] = 10**n
lowercase_ : Tuple = 28433 * (pow(2 , 7830457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 93 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 10 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError('''Invalid input''' )
A : List[str] = 10**n
A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 3 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase : Union[str, Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase : List[str] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
__lowerCamelCase : List[str] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class __snake_case ( __snake_case ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = SqueezeBertTokenizer
def __init__( self : List[str] , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , _lowercase : int=True , _lowercase : str="[UNK]" , _lowercase : int="[SEP]" , _lowercase : Dict="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : List[Any]="[MASK]" , _lowercase : List[str]=True , _lowercase : Optional[int]=None , **_lowercase : List[str] , ):
"""simple docstring"""
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(_lowercase , normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**_lowercase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __a ( self : Union[str, Any] , _lowercase : str , _lowercase : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Dict , _lowercase : Optional[Any] , _lowercase : Union[str, Any] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Any = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 219 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : str = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A ( __snake_case ):
__magic_name__ = '''gpt_neo'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : Union[str, Any] = vocab_size
A : Optional[Any] = max_position_embeddings
A : Dict = hidden_size
A : Optional[Any] = num_layers
A : Tuple = num_heads
A : int = intermediate_size
A : Optional[Any] = window_size
A : List[Any] = activation_function
A : Union[str, Any] = resid_dropout
A : Any = embed_dropout
A : List[Any] = attention_dropout
A : str = classifier_dropout
A : List[Any] = layer_norm_epsilon
A : str = initializer_range
A : List[str] = use_cache
A : Optional[int] = bos_token_id
A : List[Any] = eos_token_id
A : int = attention_types
A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : Tuple = input.size()
A : Union[str, Any] = len(snake_case__ )
A : List[str] = shape[dimension]
A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ )
A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1
A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None]
A : str = [slice(snake_case__ )] * rank
A : List[Any] = indices
A : Union[str, Any] = input[s]
A : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : List[str] = torch.arange(1 , snake_case__ )
A : Optional[int] = torch.remainder(snake_case__ , snake_case__ )
A : Optional[int] = remainders == 0
A : Optional[Any] = candidates[divisor_indices]
A : Optional[int] = torch.max(snake_case__ )
return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' )
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' )
A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A : Dict = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self._config.num_heads
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A, A : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A : str = seqlen + 2
A : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A : Any = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
A : str = common_inputs['''attention_mask''']
if self.use_past:
A : Optional[int] = ordered_inputs['''attention_mask'''].dtype
A : List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return 13
| 3 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
lowerCAmelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A = logging.getLogger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if metric == "rouge2":
lowerCAmelCase_ = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCAmelCase_ = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCAmelCase_ = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
lowerCAmelCase_ = ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=snake_case__ , verbose=snake_case__ , )
class _lowerCAmelCase ( pl.Callback ):
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCamelCase )
@rank_zero_only
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowerCAmelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowerCAmelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase_ = od / '''test_results.txt'''
lowerCAmelCase_ = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase_ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowerCAmelCase_ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_UpperCamelCase )
generations_file.parent.mkdir(exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , "a+" ) as writer:
for key in sorted(_UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase_ = metrics[key]
if isinstance(_UpperCamelCase , torch.Tensor ):
lowerCAmelCase_ = val.item()
lowerCAmelCase_ = f"""{key}: {val:.6f}\n"""
writer.write(_UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase_ = '''\n'''.join(metrics["preds"] )
generations_file.open("w+" ).write(_UpperCamelCase )
@rank_zero_only
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
try:
lowerCAmelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase_ = pl_module.model.num_parameters()
lowerCAmelCase_ = count_trainable_parameters(_UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_UpperCamelCase , _UpperCamelCase , "test" )
@rank_zero_only
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 231 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = []
A : Union[str, Any] = []
for i in range(self.num_layers ):
A : Any = self.in_channels if i == 0 else self.out_channels
A : Optional[Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnets
A : Union[str, Any] = attentions
if self.add_downsample:
A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = []
for i in range(self.num_layers ):
A : Optional[Any] = self.in_channels if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
if self.add_downsample:
A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
A : str = ()
for resnet in self.resnets:
A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = []
A : Optional[int] = []
for i in range(self.num_layers ):
A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : Dict = self.prev_output_channel if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
A : Optional[Any] = attentions
if self.add_upsample:
A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A : List[str] = res_hidden_states_tuple[-1]
A : int = res_hidden_states_tuple[:-1]
A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = []
for i in range(self.num_layers ):
A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : List[str] = self.prev_output_channel if i == 0 else self.out_channels
A : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[Any] = resnets
if self.add_upsample:
A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
A : Optional[int] = res_hidden_states_tuple[-1]
A : Optional[Any] = res_hidden_states_tuple[:-1]
A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A : List[Any] = []
for _ in range(self.num_layers ):
A : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[str] = resnets
A : List[str] = attentions
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
return hidden_states
| 3 | 0 |
'''simple docstring'''
from collections import deque
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = []
for i in range(len(UpperCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCamelCase__ (self : Dict , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = []
for i in range(len(UpperCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCamelCase__ (self : Dict , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = []
for i in range(len(UpperCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCamelCase__ (self : List[str] , UpperCamelCase : List[Any] ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase__ (self : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(UpperCamelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase )
self.finish_queue.extend(UpperCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase )
self.finish_queue.extend(UpperCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase : Union[str, Any] = Process('P1', 0, 53)
lowerCamelCase : Tuple = Process('P2', 0, 17)
lowerCamelCase : Optional[Any] = Process('P3', 0, 68)
lowerCamelCase : int = Process('P4', 0, 24)
lowerCamelCase : Tuple = 3
lowerCamelCase : Dict = [17, 25]
lowerCamelCase : Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
lowerCamelCase : Optional[int] = Process('P1', 0, 53)
lowerCamelCase : Optional[Any] = Process('P2', 0, 17)
lowerCamelCase : Union[str, Any] = Process('P3', 0, 68)
lowerCamelCase : List[Any] = Process('P4', 0, 24)
lowerCamelCase : Optional[Any] = 3
lowerCamelCase : Union[str, Any] = [17, 25]
lowerCamelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
lowerCamelCase : str = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase : Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 2 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 3 | 0 |
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(snake_case__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(snake_case__ ) == 1:
return True
lowerCAmelCase = series[1] - series[0]
for index in range(len(snake_case__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(snake_case__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
lowerCAmelCase = 0
for val in series:
answer += val
return answer / len(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ )
A : Dict = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A : Dict = dataset_size < in_memory_max_size
else:
A : Tuple = False
A : int = is_small_dataset(snake_case__ )
assert result == expected
| 3 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case : Optional[int] = HfArgumentParser(InitializationArguments)
__snake_case : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case : Optional[int] = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__snake_case : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case : str = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 248 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class A ( __snake_case ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 3 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __snake_case ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_a : List[str]= StableDiffusionLatentUpscalePipeline
_a : Optional[Any]= TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_a : Dict= PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_a : Optional[int]= TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : List[Any]= frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a : Dict= frozenset([] )
_a : List[Any]= True
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = 1
lowercase : Optional[int] = 4
lowercase : str = (16, 16)
lowercase : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(snake_case )
return image
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[str] = UNetaDConditionModel(
act_fn="""gelu""" ,attention_head_dim=8 ,norm_num_groups=snake_case ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) ,in_channels=8 ,mid_block_type=snake_case ,only_cross_attention=snake_case ,out_channels=5 ,resnet_time_scale_shift="""scale_shift""" ,time_embedding_type="""fourier""" ,timestep_post_act="""gelu""" ,up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") ,)
lowercase : List[str] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
lowercase : Optional[int] = EulerDiscreteScheduler(prediction_type="""sample""" )
lowercase : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""quick_gelu""" ,projection_dim=512 ,)
lowercase : List[str] = CLIPTextModel(snake_case )
lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase : str = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0 ):
'''simple docstring'''
if str(snake_case ).startswith("""mps""" ):
lowercase : List[Any] = torch.manual_seed(snake_case )
else:
lowercase : Optional[Any] = torch.Generator(device=snake_case ).manual_seed(snake_case )
lowercase : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = '''cpu'''
lowercase : str = self.get_dummy_components()
lowercase : List[Any] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase : str = self.get_dummy_inputs(snake_case )
lowercase : Dict = pipe(**snake_case ).images
lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
lowercase : Union[str, Any] = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
lowercase : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowercase : List[str] = self.get_dummy_components()
lowercase : List[Any] = self.pipeline_class(**snake_case )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase : Tuple = self.get_dummy_inputs(snake_case )
lowercase : Tuple = 2
lowercase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase : Dict = getattr(snake_case ,scheduler_enum.name )
lowercase : int = scheduler_cls.from_config(pipe.scheduler.config )
lowercase : Optional[int] = pipe(**snake_case )[0]
outputs.append(snake_case )
assert check_same_shape(snake_case )
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = torch.manual_seed(33 )
lowercase : Any = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ,torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowercase : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" ,torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase : List[str] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowercase : str = pipe(snake_case ,generator=snake_case ,output_type="""latent""" ).images
lowercase : Union[str, Any] = upscaler(
prompt=snake_case ,image=snake_case ,num_inference_steps=20 ,guidance_scale=0 ,generator=snake_case ,output_type="""np""" ,).images[0]
lowercase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = torch.manual_seed(33 )
lowercase : int = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" ,torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
lowercase : List[str] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowercase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
lowercase : Optional[int] = upscaler(
prompt=snake_case ,image=snake_case ,num_inference_steps=20 ,guidance_scale=0 ,generator=snake_case ,output_type="""np""" ,).images[0]
lowercase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 20 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE_ ( __snake_case ):
__magic_name__: List[Any] = ["vqvae"]
def __init__( self : List[str] , _A : List[Any] , _A : List[Any] , _A : Dict , _A : List[Any] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A , mel=_A , vqvae=_A )
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _A ) else 1000
@torch.no_grad()
def __call__( self : Optional[Any] , _A : Optional[Any] = 1 , _A : List[str] = None , _A : List[Any] = None , _A : List[Any] = 0 , _A : Tuple = 0 , _A : str = None , _A : Dict = None , _A : int = 0 , _A : Tuple = 0 , _A : Union[str, Any] = None , _A : Any = 0 , _A : str = None , _A : Tuple = None , _A : Any=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
snake_case_ : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
snake_case_ : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
snake_case_ : Union[str, Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_A , device=self.device , )
snake_case_ : Union[str, Any] = noise
snake_case_ : int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A , _A )
snake_case_ : Tuple = self.mel.audio_slice_to_image(_A )
snake_case_ : Optional[int] = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
snake_case_ : Tuple = (input_image / 255) * 2 - 1
snake_case_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
snake_case_ : Optional[int] = self.vqvae.encode(torch.unsqueeze(_A , 0 ) ).latent_dist.sample(
generator=_A )[0]
snake_case_ : Optional[Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case_ : Union[str, Any] = self.scheduler.add_noise(_A , _A , self.scheduler.timesteps[start_step - 1] )
snake_case_ : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case_ : str = int(mask_start_secs * pixels_per_second )
snake_case_ : int = int(mask_end_secs * pixels_per_second )
snake_case_ : List[Any] = self.scheduler.add_noise(_A , _A , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _A ):
snake_case_ : List[str] = self.unet(_A , _A , _A )['''sample''']
else:
snake_case_ : int = self.unet(_A , _A )['''sample''']
if isinstance(self.scheduler , _A ):
snake_case_ : Any = self.scheduler.step(
model_output=_A , timestep=_A , sample=_A , eta=_A , generator=_A , )['''prev_sample''']
else:
snake_case_ : Optional[int] = self.scheduler.step(
model_output=_A , timestep=_A , sample=_A , generator=_A , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
snake_case_ : str = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case_ : str = 1 / self.vqvae.config.scaling_factor * images
snake_case_ : Tuple = self.vqvae.decode(_A )['''sample''']
snake_case_ : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
snake_case_ : Optional[Any] = (images * 255).round().astype('uint8' )
snake_case_ : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A , mode='RGB' ).convert('L' ) for _ in images) )
snake_case_ : Any = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) , **ImagePipelineOutput(_A ) )
@torch.no_grad()
def UpperCAmelCase_ ( self : str , _A : Tuple , _A : Dict = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _A )
self.scheduler.set_timesteps(_A )
snake_case_ : Union[str, Any] = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
snake_case_ : int = (sample / 255) * 2 - 1
snake_case_ : Union[str, Any] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
snake_case_ : Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case_ : Union[str, Any] = self.scheduler.alphas_cumprod[t]
snake_case_ : Tuple = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case_ : List[Any] = 1 - alpha_prod_t
snake_case_ : Dict = self.unet(_A , _A )['''sample''']
snake_case_ : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case_ : List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case_ : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase_ ( _A : List[Any] , _A : Optional[int] , _A : List[Any] ) -> torch.Tensor:
"""simple docstring"""
snake_case_ : List[str] = acos(torch.dot(torch.flatten(_A ) , torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 327 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase : Dict = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( __snake_case ):
UpperCamelCase : List[str] = ['''pixel_values''']
def __init__( self : Tuple , UpperCAmelCase__ : Any = True , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Any = PILImageResampling.BILINEAR , UpperCAmelCase__ : List[str] = True , UpperCAmelCase__ : Dict = 1 / 255 , UpperCAmelCase__ : Any = True , UpperCAmelCase__ : Union[str, Any] = None , UpperCAmelCase__ : int = None , **UpperCAmelCase__ : List[str] , ) -> None:
super().__init__(**UpperCAmelCase__ )
_a : str = size if size is not None else {'''shortest_edge''': 384}
_a : Tuple = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_a : str = do_resize
_a : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
_a : List[Any] = crop_pct if crop_pct is not None else 224 / 256
_a : Optional[int] = resample
_a : Union[str, Any] = do_rescale
_a : List[str] = rescale_factor
_a : Union[str, Any] = do_normalize
_a : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple = PILImageResampling.BICUBIC , UpperCAmelCase__ : Any = None , **UpperCAmelCase__ : Tuple , ) -> np.ndarray:
_a : str = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
_a : Any = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_a : Dict = int(shortest_edge / crop_pct )
_a : str = get_resize_output_image_size(UpperCAmelCase__ , size=UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_a : int = resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=UpperCAmelCase__ , size=(shortest_edge, shortest_edge) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
UpperCAmelCase__ , size=(shortest_edge, shortest_edge) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> List[str]:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] = None , **UpperCAmelCase__ : str , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Optional[Any] = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : List[Any] = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : List[str] = ChannelDimension.FIRST , **UpperCAmelCase__ : Tuple , ) -> PIL.Image.Image:
_a : int = do_resize if do_resize is not None else self.do_resize
_a : Tuple = crop_pct if crop_pct is not None else self.crop_pct
_a : Optional[Any] = resample if resample is not None else self.resample
_a : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
_a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_a : List[str] = image_std if image_std is not None else self.image_std
_a : Union[str, Any] = size if size is not None else self.size
_a : List[Any] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_a : Any = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_a : Optional[int] = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
_a : Any = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , crop_pct=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
_a : Dict = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 294 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Dict ) -> Optional[Any]:
_lowerCAmelCase : Optional[int] = namedtuple("""result""" ,"""name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" ,power / current )
elif current == 0:
return result("""current""" ,power / voltage )
elif power == 0:
return result("""power""" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( __snake_case ):
__magic_name__ = DistilBertTokenizer
__magic_name__ = DistilBertTokenizerFast
__magic_name__ = True
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 3 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( __snake_case , unittest.TestCase):
UpperCamelCase_ = KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
UpperCamelCase_ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
UpperCamelCase_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ = False
@property
def __A ( self : Dict ):
'''simple docstring'''
return 32
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : List[str] ):
'''simple docstring'''
return 100
@property
def __A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def __A ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE : Tuple = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE : str = DDIMScheduler(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Tuple = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : str = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''cpu'''
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : str = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(np.array(UpperCamelCase__ ) ).float() / 255.0
SCREAMING_SNAKE_CASE : Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : int = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE : str = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe_prior(
UpperCamelCase__ , image=UpperCamelCase__ , strength=0.85 , generator=UpperCamelCase__ , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : str = pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , hint=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 182 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase : Optional[int] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''input_features''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = num_mel_bins
A : Tuple = do_ceptral_normalize
A : Dict = normalize_means
A : List[Any] = normalize_vars
A : List[str] = True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
A : Dict = x[:input_length].mean(axis=0 )
A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if normalize_vars:
A : str = x[:input_length].std(axis=0 )
A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
A : List[str] = padding_value
# make sure array is in float32
A : Tuple = x.astype(np.floataa )
return x
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : Tuple = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : Any = [raw_speech]
# extract fbank features
A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
A : str = BatchFeature({'''input_features''': features} )
A : Union[str, Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# make sure list is in array format
A : List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
A : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A : Dict = (
np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE )
if return_tensors is not None:
A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
| 3 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowercase : Optional[int] = {
'169M': 1_2,
'430M': 2_4,
'1B5': 2_4,
'3B': 3_2,
'7B': 3_2,
'14B': 4_0,
}
_lowercase : Optional[Any] = {
'169M': 7_6_8,
'430M': 1_0_2_4,
'1B5': 2_0_4_8,
'3B': 2_5_6_0,
'7B': 4_0_9_6,
'14B': 5_1_2_0,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
lowercase_ : str = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('''emb.''' ):
lowercase_ : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
lowercase_ : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
lowercase_ : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ )
# ffn -> feed_forward
lowercase_ : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
lowercase_ : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
lowercase_ : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
lowercase_ : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
lowercase_ : List[Any] = '''rwkv.''' + name
lowercase_ : Dict = weight
return state_dict
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
lowercase_ : int = 50277
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
lowercase_ : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
lowercase_ : Any = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
lowercase_ : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowercase_ : List[str] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
lowercase_ : Any = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
lowercase_ : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ )
lowercase_ : Tuple = torch.load(snake_case__ , map_location='''cpu''' )
lowercase_ : List[Any] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
lowercase_ : List[str] = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
lowercase_ : Dict = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
lowercase_ : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
lowercase_ : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowercase_ : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
lowercase_ : int = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 93 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowercase : str = get_tests_dir('fixtures/vocab.json')
lowercase : int = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = 0
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Union[str, Any] = WavaVecaConfig()
A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Dict = WavaVecaFeatureExtractor()
A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : List[Any] = WavaVecaFeatureExtractor()
A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : str = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write('''{}''' )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
A : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = '''AutoFeatureExtractor'''
__magic_name__ = '''AutoTokenizer'''
__magic_name__ = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A : Optional[int] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A : Tuple = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __lowerCAmelCase ( cls ) -> Dict:
"""simple docstring"""
A : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , )
A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : str = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 3 | 0 |
from math import factorial
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] = 20 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ = n // 2
return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__lowerCamelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 219 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : Optional[Any] = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase : List[str] = {
'google/rembert': 2_56,
}
lowercase : Dict = '▁'
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = RemBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : List[Any] = do_lower_case
A : str = remove_space
A : int = keep_accents
A : Union[str, Any] = vocab_file
A : List[Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : List[Any] = [self.sep_token_id]
A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Tuple = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
A : Any = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 3 | 0 |
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] ):
"""simple docstring"""
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
lowerCAmelCase_ = (
'''Expected a list of numbers as input, found '''
F"""{type(snake_case__ ).__name__}"""
)
raise TypeError(snake_case__ )
else:
lowerCAmelCase_ = F"""Expected a list of numbers as input, found {type(snake_case__ ).__name__}"""
raise TypeError(snake_case__ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Optional[Any] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : str = size if size is not None else {'''shortest_edge''': 384}
A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : str = do_resize
A : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : List[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : Union[str, Any] = do_rescale
A : List[str] = rescale_factor
A : Union[str, Any] = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A : Any = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
A : int = do_resize if do_resize is not None else self.do_resize
A : Tuple = crop_pct if crop_pct is not None else self.crop_pct
A : Optional[Any] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : List[str] = image_std if image_std is not None else self.image_std
A : Union[str, Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Any = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 3 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase : Optional[int] = 'Usage of script: script_name <size_of_canvas:int>'
lowerCamelCase : Any = [0] * 100 + [1] * 10
random.shuffle(choice)
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = [[False for i in range(snake_case__ )] for j in range(snake_case__ )]
return canvas
def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]:
"""simple docstring"""
for i, row in enumerate(snake_case__ ):
for j, _ in enumerate(snake_case__ ):
lowercase__ = bool(random.getrandbits(1 ) )
def _SCREAMING_SNAKE_CASE (A ) -> List[str]:
"""simple docstring"""
lowercase__ = np.array(snake_case__ )
lowercase__ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(snake_case__ ):
for c, pt in enumerate(snake_case__ ):
lowercase__ = __judge_point(
snake_case__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase__ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase__ = current_canvas.tolist()
return return_canvas
def _SCREAMING_SNAKE_CASE (A , A ) -> int:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase__ = pt
if pt:
if alive < 2:
lowercase__ = False
elif alive == 2 or alive == 3:
lowercase__ = True
elif alive > 3:
lowercase__ = False
else:
if alive == 3:
lowercase__ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase : Optional[int] = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase : Optional[int] = create_canvas(canvas_size)
seed(c)
lowerCamelCase : str = plt.subplots()
fig.show()
lowerCamelCase : Any = ListedColormap(['w', 'k'])
try:
while True:
lowerCamelCase : List[str] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 2 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ):
A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 3 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = """markuplm"""
def __init__( self , __SCREAMING_SNAKE_CASE=30522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=216 , __SCREAMING_SNAKE_CASE=1001 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=50 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) ->Dict:
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
# additional properties
lowerCAmelCase = max_depth
lowerCAmelCase = max_xpath_tag_unit_embeddings
lowerCAmelCase = max_xpath_subs_unit_embeddings
lowerCAmelCase = tag_pad_id
lowerCAmelCase = subs_pad_id
lowerCAmelCase = xpath_unit_hidden_size
| 338 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase : Union[str, Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = {}
A : str = R'''.*sequential.(\d+).*'''
A : Union[str, Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : Any = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
# replace sequential layers with list
A : Any = re.match(snake_case__ , snake_case__ ).group(1 )
A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' )
elif re.match(snake_case__ , snake_case__ ):
A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A : str = 1 if projecton_layer == 0 else 2
A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A : int = value
A : List[Any] = mixed_qkv.size(0 ) // 3
A : Union[str, Any] = mixed_qkv[:qkv_dim]
A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
A : Optional[int] = mixed_qkv[qkv_dim * 2 :]
A : Tuple = query_layer
A : Union[str, Any] = key_layer
A : Optional[int] = value_layer
else:
A : Dict = value
return model_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ )
clap_model.eval()
A : str = clap_model.state_dict()
A : Union[str, Any] = rename_state_dict(snake_case__ )
A : Tuple = ClapConfig()
A : str = enable_fusion
A : str = ClapModel(snake_case__ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.save_pretrained(snake_case__ )
transformers_config.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 3 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__snake_case : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__(unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=None , _lowercase=True , _lowercase=True , _lowercase=None , ) -> List[str]:
a_ : Any = size if size is not None else {'''height''': 20, '''width''': 20}
a_ : List[Any] = parent
a_ : Dict = batch_size
a_ : Optional[Any] = num_channels
a_ : str = image_size
a_ : List[Any] = min_resolution
a_ : Optional[int] = max_resolution
a_ : Union[str, Any] = size
a_ : Tuple = do_normalize
a_ : Tuple = do_convert_rgb
a_ : Union[str, Any] = [512, 1_024, 2_048, 4_096]
a_ : Optional[int] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def UpperCamelCase__ ( self ) -> str:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__ ( self ) -> int:
a_ : str = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
a_ : List[str] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''', )
@require_torch
@require_vision
class A__(__snake_case, unittest.TestCase ):
"""simple docstring"""
_A : List[str] = PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Optional[int] = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> Tuple:
a_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : Dict = self.image_processor_tester.prepare_dummy_image()
a_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
a_ : int = 2_048
a_ : Tuple = image_processor(_lowercase , return_tensors="""pt""" , max_patches=_lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
a_ : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Union[str, Any] = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
a_ : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
a_ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowercase ):
a_ : Any = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
a_ : Any = '''Hello'''
a_ : Any = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Optional[int] = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase , header_text=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self ) -> Dict:
a_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
a_ : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : List[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : str = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self ) -> List[str]:
a_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
a_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : Dict = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''', )
@require_torch
@require_vision
class A__(__snake_case, unittest.TestCase ):
"""simple docstring"""
_A : Any = PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> str:
a_ : Union[str, Any] = PixaStructImageProcessingTester(self , num_channels=4 )
a_ : Optional[Any] = 3
@property
def UpperCamelCase__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_convert_rgb""" ) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
a_ : int = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a_ : Any = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a_ : int = image_processor(
_lowercase , return_tensors="""pt""" , max_patches=_lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 248 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : int = logging.get_logger(__name__)
lowercase : List[str] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __snake_case ( __snake_case ):
_a : List[str]= "bart"
_a : Optional[Any]= ["past_key_values"]
_a : Any= {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self ,snake_case=50265 ,snake_case=1024 ,snake_case=12 ,snake_case=4096 ,snake_case=16 ,snake_case=12 ,snake_case=4096 ,snake_case=16 ,snake_case=0.0 ,snake_case=0.0 ,snake_case="gelu" ,snake_case=1024 ,snake_case=0.1 ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.02 ,snake_case=0.0 ,snake_case=False ,snake_case=True ,snake_case=3 ,snake_case=1 ,snake_case=0 ,snake_case=2 ,snake_case=True ,snake_case=2 ,snake_case=2 ,**snake_case ,):
'''simple docstring'''
lowercase : Dict = vocab_size
lowercase : Any = max_position_embeddings
lowercase : Union[str, Any] = d_model
lowercase : str = encoder_ffn_dim
lowercase : Dict = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Union[str, Any] = decoder_ffn_dim
lowercase : Optional[Any] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : Optional[int] = dropout
lowercase : Optional[int] = attention_dropout
lowercase : Optional[int] = activation_dropout
lowercase : Dict = activation_function
lowercase : List[str] = init_std
lowercase : Any = encoder_layerdrop
lowercase : List[Any] = decoder_layerdrop
lowercase : List[str] = classifier_dropout
lowercase : Any = use_cache
lowercase : Optional[Any] = encoder_layers
lowercase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case ,pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,forced_eos_token_id=snake_case ,**snake_case ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" ,snake_case ):
lowercase : Union[str, Any] = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"""The config can simply be saved and uploaded again to be fixed.""" )
class __snake_case ( __snake_case ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase : Optional[int] = {0: '''batch'''}
lowercase : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase : str = {0: '''batch''', 1: '''decoder_sequence'''}
lowercase : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase : Tuple = self.num_layers
for i in range(snake_case ):
lowercase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase : Tuple = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : str = super().outputs
else:
lowercase : Tuple = super(snake_case ,self ).outputs
if self.use_past:
lowercase : Any = self.num_layers
for i in range(snake_case ):
lowercase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case )
# Generate decoder inputs
lowercase : Union[str, Any] = seq_length if not self.use_past else 1
lowercase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case )
lowercase : Union[str, Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
lowercase : Union[str, Any] = dict(**snake_case ,**snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase : Union[str, Any] = common_inputs['''input_ids'''].shape
lowercase : Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
lowercase : str = self.num_attention_heads
lowercase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Tuple = decoder_seq_length + 3
lowercase : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase : Union[str, Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(snake_case ,snake_case )] ,dim=1 )
lowercase : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase : Tuple = self.num_layers
lowercase : Union[str, Any] = min(snake_case ,snake_case )
lowercase : Optional[Any] = max(snake_case ,snake_case ) - min_num_layers
lowercase : Any = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
) )
# TODO: test this.
lowercase : List[str] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case ,snake_case ):
common_inputs["past_key_values"].append((torch.zeros(snake_case ), torch.zeros(snake_case )) )
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,snake_case ,snake_case ,snake_case ,snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase : Optional[int] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase : List[Any] = seqlen + 2
lowercase : List[Any] = self.num_layers
lowercase : Optional[Any] = self.num_attention_heads
lowercase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase : Union[str, Any] = common_inputs['''attention_mask'''].dtype
lowercase : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(snake_case ,snake_case ,dtype=snake_case )] ,dim=1 )
lowercase : Tuple = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(snake_case )
]
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : Tuple = compute_effective_axis_dimension(
snake_case ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : int = tokenizer.num_special_tokens_to_add(snake_case )
lowercase : int = compute_effective_axis_dimension(
snake_case ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=snake_case )
# Generate dummy inputs according to compute batch and sequence
lowercase : Tuple = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase : List[Any] = dict(tokenizer(snake_case ,return_tensors=snake_case ) )
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
elif self.task == "causal-lm":
lowercase : int = self._generate_dummy_inputs_for_causal_lm(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
else:
lowercase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
return common_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase : List[str] = super()._flatten_past_key_values_(snake_case ,snake_case ,snake_case ,snake_case )
else:
lowercase : Optional[Any] = super(snake_case ,self )._flatten_past_key_values_(
snake_case ,snake_case ,snake_case ,snake_case )
| 20 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowercase : Optional[Any] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
A : str = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('''emb.''' ):
A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ )
# ffn -> feed_forward
A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A : List[Any] = '''rwkv.''' + name
A : Dict = weight
return state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ):
'''simple docstring'''
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A : int = 5_0277
A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
A : Any = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A : List[str] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
A : Any = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ )
A : Tuple = torch.load(snake_case__ , map_location='''cpu''' )
A : List[Any] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
A, A : List[str] = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
A : Dict = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A : int = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowercase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 3 | 0 |
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a = 0 , __a = 0 ):
snake_case_ : List[Any] = end or len(snake_case__ )
for i in range(snake_case__ , snake_case__ ):
snake_case_ : Union[str, Any] = i
snake_case_ : Optional[int] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case_ : Optional[Any] = array[temp_index - 1]
temp_index -= 1
snake_case_ : str = temp_index_value
return array
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): # Max Heap
snake_case_ : Optional[int] = index
snake_case_ : List[str] = 2 * index + 1 # Left Node
snake_case_ : Optional[int] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case_ : List[Any] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case_ : Dict = right_index
if largest != index:
snake_case_ : List[str] = array[largest], array[index]
heapify(snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : int = len(snake_case__ )
for i in range(n // 2 , -1 , -1 ):
heapify(snake_case__ , snake_case__ , snake_case__ )
for i in range(n - 1 , 0 , -1 ):
snake_case_ : Tuple = array[0], array[i]
heapify(snake_case__ , 0 , snake_case__ )
return array
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Optional[Any] = low
snake_case_ : str = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case_ : int = array[j], array[i]
i += 1
def SCREAMING_SNAKE_CASE__ ( __a ):
if len(snake_case__ ) == 0:
return array
snake_case_ : Any = 2 * math.ceil(math.loga(len(snake_case__ ) ) )
snake_case_ : Tuple = 16
return intro_sort(snake_case__ , 0 , len(snake_case__ ) , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case__ )
max_depth -= 1
snake_case_ : Dict = median_of_a(snake_case__ , snake_case__ , start + ((end - start) // 2) + 1 , end - 1 )
snake_case_ : Optional[Any] = partition(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
intro_sort(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
snake_case_ : Tuple = p
return insertion_sort(snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma : """).strip()
_SCREAMING_SNAKE_CASE = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 327 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class A ( __snake_case ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A, A : Dict = {}, {}
if padding is not None:
A : List[str] = padding
if truncation is not None:
A : Dict = truncation
if top_k is not None:
A : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = {'''image''': image, '''question''': question}
else:
A : Any = image
A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
A : Union[str, Any] = load_image(inputs['''image'''] )
A : Optional[Any] = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int:
"""simple docstring"""
if top_k > self.model.config.num_labels:
A : Dict = self.model.config.num_labels
if self.framework == "pt":
A : Optional[int] = model_outputs.logits.sigmoid()[0]
A, A : int = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A : int = scores.tolist()
A : List[str] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 3 | 0 |
"""simple docstring"""
import os
def lowerCAmelCase__ ( UpperCamelCase__ = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(snake_case__ ) , snake_case__ ) ) as in_file:
_a : Dict = in_file.read()
_a : Union[str, Any] = [[int(snake_case__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
_a : Union[str, Any] = [[0 for cell in row] for row in grid]
_a : Any = len(grid[0] )
_a : int = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
_a : List[str] = grid[0][0]
for i in range(1 , snake_case__ ):
_a : Any = grid[0][i] + dp[0][i - 1]
for i in range(1 , snake_case__ ):
_a : Dict = grid[i][0] + dp[i - 1][0]
for i in range(1 , snake_case__ ):
for j in range(1 , snake_case__ ):
_a : Union[str, Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 294 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( __snake_case ):
_UpperCamelCase : Any = 42
_UpperCamelCase : List[str] = 42
_UpperCamelCase : List[str] = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 44 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3 | 0 |
def A ( _lowercase = 100 ):
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 182 |
'''simple docstring'''
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Any = None
A : Optional[Any] = None
A : Tuple = graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Dict = len(SCREAMING_SNAKE_CASE )
A : Optional[Any] = None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if sources is int:
A : Dict = [sources]
if sinks is int:
A : str = [sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
A : Optional[int] = sources[0]
A : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
A : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A : Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A : Dict = max_input_flow
A : Tuple = 0
A : Tuple = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A : Optional[Any] = max_input_flow
A : Optional[Any] = size - 1
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = algorithm(self )
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = flow_network
A : Optional[Any] = flow_network.verticesCount
A : Tuple = flow_network.sourceIndex
A : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A : str = flow_network.graph
A : Optional[Any] = False
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if not self.executed:
self._algorithm()
A : Optional[int] = True
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
A : List[str] = -1
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A : Union[str, Any] = [0] * self.verticies_count
A : List[Any] = [0] * self.verticies_count
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A : Optional[Any] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A : Union[str, Any] = 0
while i < len(SCREAMING_SNAKE_CASE ):
A : str = vertices_list[i]
A : List[str] = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
A : int = 0
else:
i += 1
A : Optional[Any] = sum(self.preflow[self.source_index] )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : Dict = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A : Dict = self.heights[to_index]
if min_height is not None:
A : Dict = min_height + 1
if __name__ == "__main__":
lowercase : Optional[int] = [0]
lowercase : List[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowercase : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowercase : List[str] = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 3 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Union[str, Any] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowercase_ : Optional[int] = DatasetInfosDict.from_directory(snake_case__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
lowercase_ : Optional[Any] = str(snake_case__ )
dataset_info.write_to_directory(snake_case__ )
lowercase_ : Optional[Any] = DatasetInfo.from_directory(snake_case__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(snake_case__ , '''dataset_info.json''' ) )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : str = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase_ : str = dataset_info._to_yaml_dict()
assert sorted(snake_case__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase_ : Optional[Any] = yaml.safe_dump(snake_case__ )
lowercase_ : Optional[int] = yaml.safe_load(snake_case__ )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : str = DatasetInfo()
lowercase_ : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = str(snake_case__ )
dataset_infos_dict.write_to_directory(snake_case__ )
lowercase_ : Tuple = DatasetInfosDict.from_directory(snake_case__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase_ : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase_ : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(snake_case__ , '''README.md''' ) )
| 93 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 10 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or n < 0:
raise ValueError('''Invalid input''' )
A : List[str] = 10**n
A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 3 | 0 |
from typing import Any
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
for state in states_space:
SCREAMING_SNAKE_CASE__ = observations_space[0]
SCREAMING_SNAKE_CASE__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__ ) ):
SCREAMING_SNAKE_CASE__ = observations_space[o]
SCREAMING_SNAKE_CASE__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
SCREAMING_SNAKE_CASE__ = probability
SCREAMING_SNAKE_CASE__ = k_state
# Update probabilities and pointers dicts
SCREAMING_SNAKE_CASE__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE__ = arg_max
# The final observation
SCREAMING_SNAKE_CASE__ = observations_space[len(snake_case__ ) - 1]
# argmax for given final observation
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
SCREAMING_SNAKE_CASE__ = probability
SCREAMING_SNAKE_CASE__ = k_state
SCREAMING_SNAKE_CASE__ = arg_max
# Process pointers backwards
SCREAMING_SNAKE_CASE__ = last_state
SCREAMING_SNAKE_CASE__ = []
for o in range(len(snake_case__ ) - 1 , -1 , -1 ):
result.append(snake_case__ )
SCREAMING_SNAKE_CASE__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) -> int:
"""simple docstring"""
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__ )
_validate_dicts(
snake_case__ , snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : int , ) -> str:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There\'s an empty parameter""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_validate_list(snake_case__ , """observations_space""" )
_validate_list(snake_case__ , """states_space""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
if not isinstance(_object , snake_case__ ):
SCREAMING_SNAKE_CASE__ = f"""{var_name} must be a list"""
raise ValueError(snake_case__ )
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE__ = f"""{var_name} must be a list of strings"""
raise ValueError(snake_case__ )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , ) -> int:
"""simple docstring"""
_validate_dict(snake_case__ , """initial_probabilities""" , snake_case__ )
_validate_nested_dict(snake_case__ , """transition_probabilities""" )
_validate_nested_dict(snake_case__ , """emission_probabilities""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
_validate_dict(_object , snake_case__ , snake_case__ )
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Any = False ) -> List[str]:
"""simple docstring"""
if not isinstance(_object , snake_case__ ):
SCREAMING_SNAKE_CASE__ = f"""{var_name} must be a dict"""
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ):
SCREAMING_SNAKE_CASE__ = f"""{var_name} all keys must be strings"""
raise ValueError(snake_case__ )
if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ):
SCREAMING_SNAKE_CASE__ = '''nested dictionary ''' if nested else ''''''
SCREAMING_SNAKE_CASE__ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 219 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : str = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A ( __snake_case ):
__magic_name__ = '''gpt_neo'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : Union[str, Any] = vocab_size
A : Optional[Any] = max_position_embeddings
A : Dict = hidden_size
A : Optional[Any] = num_layers
A : Tuple = num_heads
A : int = intermediate_size
A : Optional[Any] = window_size
A : List[Any] = activation_function
A : Union[str, Any] = resid_dropout
A : Any = embed_dropout
A : List[Any] = attention_dropout
A : str = classifier_dropout
A : List[Any] = layer_norm_epsilon
A : str = initializer_range
A : List[str] = use_cache
A : Optional[int] = bos_token_id
A : List[Any] = eos_token_id
A : int = attention_types
A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : Tuple = input.size()
A : Union[str, Any] = len(snake_case__ )
A : List[str] = shape[dimension]
A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ )
A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1
A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None]
A : str = [slice(snake_case__ )] * rank
A : List[Any] = indices
A : Union[str, Any] = input[s]
A : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : List[str] = torch.arange(1 , snake_case__ )
A : Optional[int] = torch.remainder(snake_case__ , snake_case__ )
A : Optional[int] = remainders == 0
A : Optional[Any] = candidates[divisor_indices]
A : Optional[int] = torch.max(snake_case__ )
return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' )
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' )
A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A : Dict = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self._config.num_heads
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A, A : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A : str = seqlen + 2
A : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A : Any = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
A : str = common_inputs['''attention_mask''']
if self.use_past:
A : Optional[int] = ordered_inputs['''attention_mask'''].dtype
A : List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return 13
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = []
A : Union[str, Any] = []
for i in range(self.num_layers ):
A : Any = self.in_channels if i == 0 else self.out_channels
A : Optional[Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnets
A : Union[str, Any] = attentions
if self.add_downsample:
A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = []
for i in range(self.num_layers ):
A : Optional[Any] = self.in_channels if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
if self.add_downsample:
A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
"""simple docstring"""
A : str = ()
for resnet in self.resnets:
A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = []
A : Optional[int] = []
for i in range(self.num_layers ):
A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : Dict = self.prev_output_channel if i == 0 else self.out_channels
A : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : int = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Dict = resnets
A : Optional[Any] = attentions
if self.add_upsample:
A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
A : List[str] = res_hidden_states_tuple[-1]
A : int = res_hidden_states_tuple[:-1]
A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = True
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : int = []
for i in range(self.num_layers ):
A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
A : List[str] = self.prev_output_channel if i == 0 else self.out_channels
A : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[Any] = resnets
if self.add_upsample:
A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
A : Optional[int] = res_hidden_states_tuple[-1]
A : Optional[Any] = res_hidden_states_tuple[:-1]
A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
if self.add_upsample:
A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE )
return hidden_states
class A ( nn.Module ):
__magic_name__ = 42
__magic_name__ = 0.0
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = False
__magic_name__ = False
__magic_name__ = jnp.floataa
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
A : List[Any] = []
for _ in range(self.num_layers ):
A : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE )
A : List[str] = resnets
A : List[str] = attentions
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE )
return hidden_states
| 3 | 0 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _SCREAMING_SNAKE_CASE (A , A , **A ) -> str:
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
lowercase__ = AutoModelForSeqaSeqLM.from_config(snake_case__ )
model.save_pretrained(snake_case__ )
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 2 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 3 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ )
A : Dict = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A : Dict = dataset_size < in_memory_max_size
else:
A : Tuple = False
A : int = is_small_dataset(snake_case__ )
assert result == expected
| 3 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if len(snake_case__) < k or k < 0:
raise ValueError("""Invalid Input""")
a_ : Any = sum(array[:k])
for i in range(len(snake_case__) - k):
a_ : Union[str, Any] = current_sum - array[i] + array[i + k]
a_ : List[Any] = max(snake_case__ , snake_case__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__snake_case : int = [randint(-10_00, 10_00) for i in range(1_00)]
__snake_case : List[str] = randint(0, 1_10)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 248 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class A ( __snake_case ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 3 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : str = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=3 ,)
return model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.dummy_uncond_unet
lowercase : List[str] = DDIMScheduler()
lowercase : List[str] = self.dummy_vq_model
lowercase : str = LDMPipeline(unet=snake_case ,vqvae=snake_case ,scheduler=snake_case )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
lowercase : Any = torch.manual_seed(0 )
lowercase : int = ldm(generator=snake_case ,num_inference_steps=2 ,output_type="""numpy""" ).images
lowercase : str = torch.manual_seed(0 )
lowercase : Optional[Any] = ldm(generator=snake_case ,num_inference_steps=2 ,output_type="""numpy""" ,return_dict=snake_case )[0]
lowercase : List[str] = image[0, -3:, -3:, -1]
lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : Dict = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase : List[Any] = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case )
ldm.set_progress_bar_config(disable=snake_case )
lowercase : Union[str, Any] = torch.manual_seed(0 )
lowercase : str = ldm(generator=snake_case ,num_inference_steps=5 ,output_type="""numpy""" ).images
lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase : List[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase : Tuple = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 20 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase : Dict = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3 | 0 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_snake_case = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_snake_case = 'sshleifer/student_marian_en_ro_6_1'
_snake_case = 'sshleifer/tiny-mbart'
@require_torch
class UpperCamelCase ( __snake_case ):
def _lowercase ( self : int , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[int]=True , ) -> str:
_a : str = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCAmelCase__ , num_train_epochs=1 , distributed=UpperCAmelCase__ , extra_args_str=UpperCAmelCase__ , predict_with_generate=UpperCAmelCase__ , do_train=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , do_predict=UpperCAmelCase__ , )
_a : List[Any] = TrainerState.load_from_json(os.path.join(UpperCAmelCase__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_a : Dict = [log for log in logs if '''eval_loss''' in log.keys()]
_a : List[str] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_a : Tuple = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , UpperCAmelCase__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _lowercase ( self : Tuple ) -> Union[str, Any]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _lowercase ( self : List[Any] ) -> List[str]:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ )
@require_torch_multi_gpu
def _lowercase ( self : int ) -> List[Any]:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : Any ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : str ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : int ) -> int:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=UpperCAmelCase__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : List[str] ) -> Union[str, Any]:
self.run_seqaseq_quick(
distributed=UpperCAmelCase__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=UpperCAmelCase__ )
@require_apex
@require_torch_gpu
def _lowercase ( self : Optional[Any] ) -> int:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> str:
_a : Any = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
_a : Any = experiments[experiment_id]
_a : Any = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
_a : Union[str, Any] = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCAmelCase__ , extra_args_str=data["""extra_args_str"""] )
_a : int = len(re.findall(UpperCAmelCase__ , cl.err ) )
self.assertEqual(UpperCAmelCase__ , data["""n_matches"""] )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
_a : Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=UpperCAmelCase__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=UpperCAmelCase__ , )
# Check metrics
_a : Union[str, Any] = TrainerState.load_from_json(os.path.join(UpperCAmelCase__ , """trainer_state.json""" ) ).log_history
_a : Union[str, Any] = [log for log in logs if '''eval_loss''' in log.keys()]
_a : List[str] = eval_metrics[0]
_a : List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , UpperCAmelCase__ )
# test if do_predict saves generations and metrics
_a : int = os.listdir(UpperCAmelCase__ )
_a : Optional[int] = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _lowercase ( self : Any ) -> Union[str, Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCAmelCase__ : Union[str, Any] ) -> Tuple[int, float]:
_a : Optional[int] = '''--skip_memory_metrics 0'''
_a : List[Any] = self.run_trainer(
max_len=128 , model_name=UpperCAmelCase__ , learning_rate=3E-4 , num_train_epochs=1 , optim=UpperCAmelCase__ , distributed=UpperCAmelCase__ , extra_args_str=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , do_predict=UpperCAmelCase__ , n_gpus_to_use=1 , )
# Check metrics
_a : str = TrainerState.load_from_json(Path(UpperCAmelCase__ , """trainer_state.json""" ) ).log_history
_a : Union[str, Any] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_a : int = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_a : List[Any] = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_a : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_a : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_a : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_a : Union[str, Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig
_a : Optional[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_a : str = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_a : List[str] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCAmelCase__ , UpperCAmelCase__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
UpperCAmelCase__ , UpperCAmelCase__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
UpperCAmelCase__ , UpperCAmelCase__ , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] = 3E-3 , UpperCAmelCase__ : Optional[Any] = "adafactor" , UpperCAmelCase__ : Optional[Any] = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Optional[Any] = 0 , UpperCAmelCase__ : str = True , UpperCAmelCase__ : Any = True , UpperCAmelCase__ : Union[str, Any] = True , UpperCAmelCase__ : Optional[int] = True , UpperCAmelCase__ : Any = None , ) -> Tuple:
_a : Tuple = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
_a : Dict = self.get_auto_remove_tmp_dir()
_a : int = f"""\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(UpperCAmelCase__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(UpperCAmelCase__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n """.split()
_a : Any = f"""\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(UpperCAmelCase__ )}\n """.split()
_a : Optional[Any] = '''
--do_predict
'''.split()
_a : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_a : Dict = get_gpu_count()
_a : Tuple = get_torch_dist_unique_port()
_a : str = f"""\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n """.split()
_a : str = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() )
else:
_a : List[str] = ['''run_translation.py'''] + args
with patch.object(UpperCAmelCase__ , """argv""" , UpperCAmelCase__ ):
main()
return output_dir
| 294 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3 | 0 |
"""simple docstring"""
import argparse
_a : Dict = 'docs/source/_static/js/custom.js'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
with open(snake_case__ ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_lowerCAmelCase : Dict = f.readlines()
_lowerCAmelCase : Dict = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
_lowerCAmelCase : Union[str, Any] = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(snake_case__ )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_a : List[str] = parser.parse_args()
update_custom_js(args.version)
| 44 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( __snake_case ):
__magic_name__ = DistilBertTokenizer
__magic_name__ = DistilBertTokenizerFast
__magic_name__ = True
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 3 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowercase__ :
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Any=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=33 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : str ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = EsmModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = EsmForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = EsmForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( __snake_case , __snake_case , unittest.TestCase):
UpperCamelCase_ = False
UpperCamelCase_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = ()
UpperCamelCase_ = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = True
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = EsmModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __A ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = EsmModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = EsmEmbeddings(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE : Any = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE : int = create_position_ids_from_input_ids(UpperCamelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE : int = EsmEmbeddings(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE : Any = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE : int = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def __A ( self : str ):
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@require_torch
class lowercase__ ( __snake_case):
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = 33
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def __A ( self : List[str] ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE : Any = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : int = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 182 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase : Optional[int] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''input_features''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = num_mel_bins
A : Tuple = do_ceptral_normalize
A : Dict = normalize_means
A : List[Any] = normalize_vars
A : List[str] = True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
A : Dict = x[:input_length].mean(axis=0 )
A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if normalize_vars:
A : str = x[:input_length].std(axis=0 )
A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
A : List[str] = padding_value
# make sure array is in float32
A : Tuple = x.astype(np.floataa )
return x
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : Tuple = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : Any = [raw_speech]
# extract fbank features
A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
A : str = BatchFeature({'''input_features''': features} )
A : Union[str, Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# make sure list is in array format
A : List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
A : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A : Dict = (
np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE )
if return_tensors is not None:
A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
| 3 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_lowercase : Union[str, Any] = 3
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
lowercase_ : Optional[Any] = random.randrange(3 , snake_case__ )
if pow(snake_case__ , 2 , snake_case__ ) == 1:
continue
if pow(snake_case__ , snake_case__ , snake_case__ ) == 1:
continue
return g
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
print('''Generating prime p...''' )
lowercase_ : int = rabin_miller.generate_large_prime(snake_case__ ) # select large prime number.
lowercase_ : List[str] = primitive_root(snake_case__ ) # one primitive root on modulo p.
lowercase_ : Union[str, Any] = random.randrange(3 , snake_case__ ) # private_key -> have to be greater than 2 for safety.
lowercase_ : str = cryptomath.find_mod_inverse(pow(snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
lowercase_ : Dict = (key_size, e_a, e_a, p)
lowercase_ : Optional[int] = (key_size, d)
return public_key, private_key
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase_ : int = generate_key(snake_case__ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def snake_case_ ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 93 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowercase : str = get_tests_dir('fixtures/vocab.json')
lowercase : int = get_tests_dir('fixtures')
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = 0
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Union[str, Any] = WavaVecaConfig()
A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : Dict = WavaVecaFeatureExtractor()
A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : List[Any] = WavaVecaFeatureExtractor()
A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f:
A : str = json.load(SCREAMING_SNAKE_CASE )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE ) )
A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write('''{}''' )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
A : List[str] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
A : Tuple = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
A : List[str] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE )
A : int = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE ):
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = False
class A ( __snake_case ):
__magic_name__ = '''AutoFeatureExtractor'''
__magic_name__ = '''AutoTokenizer'''
__magic_name__ = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE )
AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A : Optional[int] = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A : Tuple = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class A ( unittest.TestCase ):
__magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __lowerCAmelCase ( cls ) -> Dict:
"""simple docstring"""
A : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE )
@classmethod
def __lowerCAmelCase ( cls ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token )
A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , )
A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
A : str = CustomTokenizer(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f:
A : Dict = json.load(SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) )
repo.push_to_hub()
A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 3 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = SamImageProcessor()
SCREAMING_SNAKE_CASE__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def __a ( self : Tuple , **_lowercase : Optional[int] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def __a ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(_lowercase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = [torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ = [[17_64, 26_46]]
SCREAMING_SNAKE_CASE__ = [[6_83, 10_24]]
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(
_lowercase , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
SCREAMING_SNAKE_CASE__ = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE__ = [[1, 0], [0, 1]]
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(_lowercase , np.array(_lowercase ) , np.array(_lowercase ) )
@require_vision
@require_tf
class __snake_case ( unittest.TestCase ):
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = SamImageProcessor()
SCREAMING_SNAKE_CASE__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def __a ( self : Optional[int] , **_lowercase : Dict ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def __a ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(_lowercase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = [tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ = [[17_64, 26_46]]
SCREAMING_SNAKE_CASE__ = [[6_83, 10_24]]
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(_lowercase , _lowercase , _lowercase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(
_lowercase , tf.convert_to_tensor(_lowercase ) , tf.convert_to_tensor(_lowercase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
SCREAMING_SNAKE_CASE__ = [np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(
_lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
SCREAMING_SNAKE_CASE__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(
_lowercase , np.array(_lowercase ) , np.array(_lowercase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __snake_case ( unittest.TestCase ):
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = SamImageProcessor()
SCREAMING_SNAKE_CASE__ = SamProcessor(_lowercase )
processor.save_pretrained(self.tmpdirname )
def __a ( self : List[Any] , **_lowercase : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def __a ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = [tf.convert_to_tensor(_lowercase )]
SCREAMING_SNAKE_CASE__ = [torch.tensor(_lowercase )]
SCREAMING_SNAKE_CASE__ = [[17_64, 26_46]]
SCREAMING_SNAKE_CASE__ = [[6_83, 10_24]]
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(
_lowercase , _lowercase , _lowercase , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = processor.post_process_masks(
_lowercase , _lowercase , _lowercase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = SamProcessor(image_processor=_lowercase )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(_lowercase , return_tensors="""pt""" )['''pixel_values'''].numpy()
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""pt""" )['''pixel_values'''].numpy()
SCREAMING_SNAKE_CASE__ = image_processor(_lowercase , return_tensors="""tf""" )['''pixel_values'''].numpy()
SCREAMING_SNAKE_CASE__ = processor(images=_lowercase , return_tensors="""tf""" )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
self.assertTrue(np.allclose(_lowercase , _lowercase ) )
| 219 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : Optional[Any] = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase : List[str] = {
'google/rembert': 2_56,
}
lowercase : Dict = '▁'
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = RemBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : List[Any] = do_lower_case
A : str = remove_space
A : int = keep_accents
A : Union[str, Any] = vocab_file
A : List[Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : List[Any] = [self.sep_token_id]
A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Tuple = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
A : Any = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 3 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=16 , _UpperCamelCase=36 , _UpperCamelCase=6 , _UpperCamelCase=6 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Dict:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = embedding_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_hidden_groups
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def __a ( self ) -> int:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Union[str, Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = AlbertModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCAmelCase_ = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
lowerCAmelCase_ = AlbertForPreTraining(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , sentence_order_label=_UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
lowerCAmelCase_ = AlbertForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = AlbertForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = AlbertForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = AlbertForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = AlbertForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
_lowercase =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase =True
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> Tuple:
lowerCAmelCase_ = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
lowerCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase )
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def __a ( self ) -> Dict:
lowerCAmelCase_ = AlbertModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __a ( self ) -> int:
self.config_tester.run_common_tests()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def __a ( self ) -> Tuple:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = AlbertModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[str]:
lowerCAmelCase_ = AlbertModel.from_pretrained("albert-base-v2" )
lowerCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
lowerCAmelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
lowerCAmelCase_ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1e-4 ) )
| 231 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Optional[Any] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : str = size if size is not None else {'''shortest_edge''': 384}
A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : str = do_resize
A : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : List[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : Union[str, Any] = do_rescale
A : List[str] = rescale_factor
A : Union[str, Any] = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A : Any = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
A : int = do_resize if do_resize is not None else self.do_resize
A : Tuple = crop_pct if crop_pct is not None else self.crop_pct
A : Optional[Any] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : List[str] = image_std if image_std is not None else self.image_std
A : Union[str, Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Any = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 3 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ["""flax"""]
def __init__(self : Union[str, Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Optional[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[Any] , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = ["""flax"""]
def __init__(self : Dict , *UpperCamelCase : List[Any] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : str , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : str , *UpperCamelCase : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = ["""flax"""]
def __init__(self : str , *UpperCamelCase : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Optional[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : int = ["""flax"""]
def __init__(self : Optional[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Optional[Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Any = ["""flax"""]
def __init__(self : Union[str, Any] , *UpperCamelCase : str , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[str] , *UpperCamelCase : int , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = ["""flax"""]
def __init__(self : str , *UpperCamelCase : int , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Optional[int] , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : str , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : int = ["""flax"""]
def __init__(self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Union[str, Any] , *UpperCamelCase : Tuple , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ["""flax"""]
def __init__(self : List[str] , *UpperCamelCase : int , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Any , *UpperCamelCase : Dict , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[str] , *UpperCamelCase : int , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = ["""flax"""]
def __init__(self : List[str] , *UpperCamelCase : Dict , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Any , *UpperCamelCase : Any , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Dict , *UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = ["""flax"""]
def __init__(self : List[Any] , *UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Any , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = ["""flax"""]
def __init__(self : Optional[Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Any , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Any , *UpperCamelCase : str , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ["""flax"""]
def __init__(self : Dict , *UpperCamelCase : List[str] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : Optional[int] , *UpperCamelCase : str , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class __lowerCAmelCase (metaclass=__snake_case ):
'''simple docstring'''
lowerCAmelCase__ : int = ["""flax"""]
def __init__(self : Dict , *UpperCamelCase : Dict , **UpperCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[Any] , *UpperCamelCase : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def UpperCamelCase__ (cls : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 2 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ):
A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 3 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = '''ZinengTang/tvlt-base'''
lowerCAmelCase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->Optional[int]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.ones([12000] )
lowerCAmelCase = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase = processor(audio=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.ones([3, 224, 224] )
lowerCAmelCase = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
lowerCAmelCase = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.ones([12000] )
lowerCAmelCase = np.ones([3, 224, 224] )
lowerCAmelCase = processor(audio=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = TvltProcessor(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 338 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase : Union[str, Any] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ ( snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : Tuple = create_model(
'''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = {}
A : str = R'''.*sequential.(\d+).*'''
A : Union[str, Any] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A : Any = key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
# replace sequential layers with list
A : Any = re.match(snake_case__ , snake_case__ ).group(1 )
A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' )
elif re.match(snake_case__ , snake_case__ ):
A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A : str = 1 if projecton_layer == 0 else 2
A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A : int = value
A : List[Any] = mixed_qkv.size(0 ) // 3
A : Union[str, Any] = mixed_qkv[:qkv_dim]
A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
A : Optional[int] = mixed_qkv[qkv_dim * 2 :]
A : Tuple = query_layer
A : Union[str, Any] = key_layer
A : Optional[int] = value_layer
else:
A : Dict = value
return model_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ )
clap_model.eval()
A : str = clap_model.state_dict()
A : Union[str, Any] = rename_state_dict(snake_case__ )
A : Tuple = ClapConfig()
A : str = enable_fusion
A : str = ClapModel(snake_case__ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case__ , strict=snake_case__ )
model.save_pretrained(snake_case__ )
transformers_config.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowercase : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 3 | 0 |
from math import factorial
__snake_case : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def _UpperCAmelCase ( a__):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__):
raise TypeError("""Parameter number must be int""")
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""")
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(snake_case__))
def _UpperCAmelCase ( a__ = 6_0 , a__ = 1_0_0_0_0_0_0):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__) or not isinstance(snake_case__ , snake_case__):
raise TypeError("""Parameters chain_length and number_limit must be int""")
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""")
# the counter for the chains with the exact desired length
a_ : int = 0
# the cached sizes of the previous chains
a_ : dict[int, int] = {}
for start_chain_element in range(1 , snake_case__):
# The temporary set will contain the elements of the chain
a_ : int = set()
a_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a_ : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(snake_case__)
chain_set_length += 1
a_ : Tuple = digit_factorial_sum(snake_case__)
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a_ : Dict = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 248 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __snake_case ( __snake_case ):
_a : Union[str, Any]= ["image_processor", "tokenizer"]
_a : List[str]= "BlipImageProcessor"
_a : Tuple= "AutoTokenizer"
def __init__( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
super().__init__(snake_case ,snake_case )
# add QFormer tokenizer
lowercase : Tuple = qformer_tokenizer
def __call__( self ,snake_case = None ,snake_case = None ,snake_case = True ,snake_case = False ,snake_case = None ,snake_case = None ,snake_case = 0 ,snake_case = None ,snake_case = None ,snake_case = False ,snake_case = False ,snake_case = False ,snake_case = False ,snake_case = False ,snake_case = True ,snake_case = None ,**snake_case ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
lowercase : Tuple = BatchFeature()
if text is not None:
lowercase : Tuple = self.tokenizer(
text=snake_case ,add_special_tokens=snake_case ,padding=snake_case ,truncation=snake_case ,max_length=snake_case ,stride=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,return_overflowing_tokens=snake_case ,return_special_tokens_mask=snake_case ,return_offsets_mapping=snake_case ,return_token_type_ids=snake_case ,return_length=snake_case ,verbose=snake_case ,return_tensors=snake_case ,**snake_case ,)
encoding.update(snake_case )
lowercase : List[str] = self.qformer_tokenizer(
text=snake_case ,add_special_tokens=snake_case ,padding=snake_case ,truncation=snake_case ,max_length=snake_case ,stride=snake_case ,pad_to_multiple_of=snake_case ,return_attention_mask=snake_case ,return_overflowing_tokens=snake_case ,return_special_tokens_mask=snake_case ,return_offsets_mapping=snake_case ,return_token_type_ids=snake_case ,return_length=snake_case ,verbose=snake_case ,return_tensors=snake_case ,**snake_case ,)
lowercase : Optional[Any] = qformer_text_encoding.pop("""input_ids""" )
lowercase : List[str] = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
lowercase : List[Any] = self.image_processor(snake_case ,return_tensors=snake_case )
encoding.update(snake_case )
return encoding
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case ,**snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer.model_input_names
lowercase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,**snake_case ):
'''simple docstring'''
if os.path.isfile(snake_case ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(snake_case ,exist_ok=snake_case )
lowercase : int = os.path.join(snake_case ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(snake_case )
return super().save_pretrained(snake_case ,**snake_case )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ,snake_case ,**snake_case ):
'''simple docstring'''
lowercase : int = AutoTokenizer.from_pretrained(snake_case ,subfolder="""qformer_tokenizer""" )
lowercase : Union[str, Any] = cls._get_arguments_from_pretrained(snake_case ,**snake_case )
args.append(snake_case )
return cls(*snake_case )
| 20 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowercase : Optional[Any] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = list(state_dict.keys() )
for name in state_dict_keys:
A : str = state_dict.pop(snake_case__ )
# emb -> embedding
if name.startswith('''emb.''' ):
A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ )
# ffn -> feed_forward
A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
A : List[Any] = '''rwkv.''' + name
A : Dict = weight
return state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ):
'''simple docstring'''
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
A : int = 5_0277
A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ )
A : Any = len(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
# 2. Build the config
A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
A : List[str] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
A : Any = RwkvConfig(
vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case__ )
# 3. Download model file then convert state_dict
A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ )
A : Tuple = torch.load(snake_case__ , map_location='''cpu''' )
A : List[Any] = convert_state_dict(snake_case__ )
# 4. Split in shards and save
A, A : List[str] = shard_checkpoint(snake_case__ )
for shard_file, shard in shards.items():
torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
if index is not None:
A : Dict = os.path.join(snake_case__ , snake_case__ )
# Save the index as well
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n'''
f.write(snake_case__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
A : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
A : int = AutoModelForCausalLM.from_pretrained(snake_case__ )
model.push_to_hub(snake_case__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(snake_case__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowercase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 3 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a = 10_00 ):
snake_case_ : int = 2**power
snake_case_ : List[Any] = str(snake_case__ )
snake_case_ : List[str] = list(snake_case__ )
snake_case_ : List[str] = 0
for i in list_num:
sum_of_num += int(snake_case__ )
return sum_of_num
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_SCREAMING_SNAKE_CASE = solution(power)
print("""Sum of the digits is: """, result)
| 327 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase : str = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class A ( __snake_case ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A, A : Dict = {}, {}
if padding is not None:
A : List[str] = padding
if truncation is not None:
A : Dict = truncation
if top_k is not None:
A : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : int = {'''image''': image, '''question''': question}
else:
A : Any = image
A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
A : Union[str, Any] = load_image(inputs['''image'''] )
A : Optional[Any] = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : List[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int:
"""simple docstring"""
if top_k > self.model.config.num_labels:
A : Dict = self.model.config.num_labels
if self.framework == "pt":
A : Optional[int] = model_outputs.logits.sigmoid()[0]
A, A : int = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A : int = scores.tolist()
A : List[str] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 3 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_snake_case = (720, 1280) # Height, Width
_snake_case = (0.4, 0.6) # if height or width lower than this scale, drop it.
_snake_case = 1 / 100
_snake_case = ''
_snake_case = ''
_snake_case = ''
_snake_case = 250
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = get_dataset(snake_case__ , snake_case__ )
for index in range(snake_case__ ):
_a : Tuple = random.sample(range(len(snake_case__ ) ) , 4 )
_a : List[str] = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_a : Union[str, Any] = random_chars(3_2 )
_a : Any = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_a : Union[str, Any] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_a : Dict = []
for anno in new_annos:
_a : Optional[int] = anno[3] - anno[1]
_a : int = anno[4] - anno[2]
_a : str = anno[1] + width / 2
_a : List[str] = anno[2] + height / 2
_a : Dict = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case__ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = []
_a : Dict = []
for label_file in glob.glob(os.path.join(snake_case__ , """*.txt""" ) ):
_a : List[str] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case__ ) as in_file:
_a : Optional[Any] = in_file.readlines()
_a : Optional[Any] = os.path.join(snake_case__ , F"""{label_name}.jpg""" )
_a : Tuple = []
for obj_list in obj_lists:
_a : List[Any] = obj_list.rstrip("""\n""" ).split(""" """ )
_a : str = float(obj[1] ) - float(obj[3] ) / 2
_a : List[Any] = float(obj[2] ) - float(obj[4] ) / 2
_a : int = float(obj[1] ) + float(obj[3] ) / 2
_a : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , ):
'''simple docstring'''
_a : List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_a : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_a : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_a : Optional[Any] = int(scale_x * output_size[1] )
_a : Union[str, Any] = int(scale_y * output_size[0] )
_a : Union[str, Any] = []
_a : Union[str, Any] = []
for i, index in enumerate(snake_case__ ):
_a : List[Any] = all_img_list[index]
path_list.append(snake_case__ )
_a : Tuple = all_annos[index]
_a : Optional[Any] = cva.imread(snake_case__ )
if i == 0: # top-left
_a : Optional[Any] = cva.resize(snake_case__ , (divid_point_x, divid_point_y) )
_a : Optional[Any] = img
for bbox in img_annos:
_a : Union[str, Any] = bbox[1] * scale_x
_a : Optional[Any] = bbox[2] * scale_y
_a : int = bbox[3] * scale_x
_a : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_a : Any = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) )
_a : Optional[Any] = img
for bbox in img_annos:
_a : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
_a : Tuple = bbox[2] * scale_y
_a : Any = scale_x + bbox[3] * (1 - scale_x)
_a : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_a : Optional[Any] = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) )
_a : Any = img
for bbox in img_annos:
_a : Optional[Any] = bbox[1] * scale_x
_a : List[str] = scale_y + bbox[2] * (1 - scale_y)
_a : Optional[Any] = bbox[3] * scale_x
_a : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_a : List[str] = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_a : str = img
for bbox in img_annos:
_a : Dict = scale_x + bbox[1] * (1 - scale_x)
_a : int = scale_y + bbox[2] * (1 - scale_y)
_a : List[Any] = scale_x + bbox[3] * (1 - scale_x)
_a : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_a : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_a : int = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 294 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3 | 0 |
"""simple docstring"""
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase : float = a
_lowerCAmelCase : float = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
_lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
_lowerCAmelCase : Union[str, Any] = mid
else:
_lowerCAmelCase : Optional[Any] = mid
_lowerCAmelCase : Optional[int] = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> int:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 44 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' )
A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowercase : str = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 3 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = (KDPMaDiscreteScheduler,)
lowerCamelCase : Optional[int] = 10
def __UpperCAmelCase ( self : List[Any] , **UpperCAmelCase__ : Optional[int] ) -> Any:
lowerCAmelCase = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def __UpperCAmelCase ( self : Tuple ) -> Dict:
if torch_device == "mps":
return
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
if torch_device == "mps":
return
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
if str(UpperCAmelCase__ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Dict=1.0 , lowerCamelCase : Dict=None , lowerCamelCase : str=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=4_0_0 , UpperCAmelCase__ : List[Any]=2_0_0_0 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : List[Any]=1_6_0_0_0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , ) -> List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Any ) -> List[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Optional[int]=False ) -> Dict:
def _flatten(UpperCAmelCase__ : Tuple ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = WavaVecaFeatureExtractor
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = WavaVecaFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(UpperCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
lowerCAmelCase = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = feat_extract(UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding=UpperCAmelCase__ )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1_0_0_0 , padding='max_length' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1_0_0_0 , padding='longest' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = feat_extract(
UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=2_0_0_0 , padding='longest' , return_tensors='np' )
lowerCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' )
| 4 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = '''biogpt'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=4_2_3_8_4 , UpperCAmelCase__ : List[str]=1_0_2_4 , UpperCAmelCase__ : Optional[Any]=2_4 , UpperCAmelCase__ : Optional[int]=1_6 , UpperCAmelCase__ : List[Any]=4_0_9_6 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=1_0_2_4 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Union[str, Any]=1E-12 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : str=0 , UpperCAmelCase__ : Dict=2 , **UpperCAmelCase__ : List[str] , ) -> int:
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = scale_embedding
lowerCAmelCase = use_cache
lowerCAmelCase = layerdrop
lowerCAmelCase = activation_dropout
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__snake_case =[
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
__snake_case ={
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
__snake_case ={F'''funnel-transformer/{name}''': 512 for name in _model_names}
__snake_case ={F'''funnel-transformer/{name}''': {"""do_lower_case""": True} for name in _model_names}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = FunnelTokenizer
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = 2
def __init__( self : Tuple , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Union[str, Any]="<sep>" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Dict="<cls>" , UpperCAmelCase__ : int="<mask>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : int="</s>" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple="##" , **UpperCAmelCase__ : Dict , ) -> Tuple:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , clean_text=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , wordpieces_prefix=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
lowerCAmelCase = do_lower_case
lowerCAmelCase = strip_accents
lowerCAmelCase = tokenize_chinese_chars
lowerCAmelCase = normalizer_class(**UpperCAmelCase__ )
lowerCAmelCase = do_lower_case
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any=None ) -> Optional[int]:
lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def a_ ( lowerCamelCase : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def a_ ( lowerCamelCase : str = "" ):
if len(lowerCamelCase ) == 0:
return True
lowerCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase = {}
for character in lower_case_input_str:
lowerCAmelCase = character_freq_dict.get(lowerCamelCase , 0 ) + 1
lowerCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a_ ( lowerCamelCase : str = "" ):
print('\nFor string = ' , lowerCamelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(lowerCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
__snake_case =input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
__snake_case =can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a_ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=1024 , lowerCamelCase : Tuple=1024 , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='train' , **lowerCamelCase )
lowerCAmelCase = tok.pad_token_id
def get_lens(lowerCamelCase : List[Any] ):
lowerCAmelCase = tqdm(
DataLoader(lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase = []
for batch in dl:
lowerCAmelCase = batch['input_ids'].ne(lowerCamelCase ).sum(1 ).tolist()
lowerCAmelCase = batch['labels'].ne(lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase , lowerCamelCase ):
max_lens.append(max(lowerCamelCase , lowerCamelCase ) )
else:
max_lens.extend(lowerCamelCase )
return max_lens
lowerCAmelCase = get_lens(lowerCamelCase )
lowerCAmelCase = SeqaSeqDataset(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , type_path='val' , **lowerCamelCase )
lowerCAmelCase = get_lens(lowerCamelCase )
pickle_save(lowerCamelCase , train_ds.len_file )
pickle_save(lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 4 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
import numpy as np
def a_ ( lowerCamelCase : np.ndarray , lowerCamelCase : float ):
return np.where(vector > 0 , lowerCamelCase , (alpha * (np.exp(lowerCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a_ ( lowerCamelCase : Any=None , lowerCamelCase : int=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
metadata={'''help''': '''The csv file to plot.'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
lowerCamelCase : Optional[List[str]] = list_field(
default=__lowercase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def a_ ( lowerCamelCase : Union[str, Any] ):
try:
int(lowerCamelCase )
return True
except ValueError:
return False
def a_ ( lowerCamelCase : List[Any] ):
try:
float(lowerCamelCase )
return True
except ValueError:
return False
class UpperCAmelCase_ :
def __init__( self : int , UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
lowerCAmelCase = args
lowerCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
lowerCAmelCase = csv.DictReader(UpperCAmelCase__ )
for row in reader:
lowerCAmelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowerCAmelCase = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowerCAmelCase = float(row['result'] )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase , lowerCAmelCase = plt.subplots()
lowerCAmelCase = 'Time usage' if self.args.is_time else 'Memory usage'
lowerCAmelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowerCAmelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowerCAmelCase = self.result_dict[model_name]['result']
((lowerCAmelCase) , (lowerCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCAmelCase__ , )
else:
lowerCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase) , (lowerCAmelCase)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowerCAmelCase = np.asarray(UpperCAmelCase__ , UpperCAmelCase__ )[: len(UpperCAmelCase__ )]
plt.scatter(
UpperCAmelCase__ , UpperCAmelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(UpperCAmelCase__ , UpperCAmelCase__ , '--' )
title_str += F''' {label_model_name} vs.'''
lowerCAmelCase = title_str[:-4]
lowerCAmelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(UpperCAmelCase__ )
plt.xlabel(UpperCAmelCase__ )
plt.ylabel(UpperCAmelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a_ ( ):
lowerCAmelCase = HfArgumentParser(lowerCamelCase )
lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase = Plot(args=lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 1 |
'''simple docstring'''
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase ) )
def a_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : int ):
# Base Case
if index == len(lowerCamelCase ):
return True
# Recursive Step
for i in range(lowerCamelCase ):
if valid_coloring(graph[index] , lowerCamelCase , lowerCamelCase ):
# Color current vertex
lowerCAmelCase = i
# Validate coloring
if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , index + 1 ):
return True
# Backtrack
lowerCAmelCase = -1
return False
def a_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ):
lowerCAmelCase = [-1] * len(lowerCamelCase )
if util_color(lowerCamelCase , lowerCamelCase , lowerCamelCase , 0 ):
return colored_vertices
return []
| 4 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Dict ) -> List[str]:
super().__init__()
lowerCAmelCase = nn.Linear(3 , 4 )
lowerCAmelCase = nn.BatchNormad(4 )
lowerCAmelCase = nn.Linear(4 , 5 )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Tuple ) -> Any:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Any ) -> Any:
lowerCAmelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , model.state_dict() )
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCAmelCase = torch.randn(2 , 3 , dtype=UpperCAmelCase__ )
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase = offload_weight(UpperCAmelCase__ , 'weight' , UpperCAmelCase__ , {} )
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCAmelCase__ ) )
self.assertDictEqual(UpperCAmelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCAmelCase__ ).split('.' )[1]}} )
lowerCAmelCase = load_offloaded_weight(UpperCAmelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = ModelForTest()
lowerCAmelCase = model.state_dict()
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
lowerCAmelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
# Duplicates are removed
lowerCAmelCase = OffloadedWeightsLoader(state_dict=UpperCAmelCase__ , save_folder=UpperCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase__ , weight_map[key] ) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
lowerCAmelCase = extract_submodules_state_dict(UpperCAmelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCAmelCase__ , {'a.1': 0, 'a.2': 2} )
lowerCAmelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
lowerCAmelCase = extract_submodules_state_dict(UpperCAmelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCAmelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 4 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =[
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
__snake_case =[
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple ):
lowerCAmelCase = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase = int(re.match(R'.*layer_(\d*).*' , lowerCamelCase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def a_ ( lowerCamelCase : List[Any] ):
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase = re.search(R'[^\d](\d+)$' , str(lowerCamelCase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
lowerCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def a_ ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ):
# Construct model
if bloom_config_file == "":
lowerCAmelCase = BloomConfig()
else:
lowerCAmelCase = BloomConfig.from_json_file(lowerCamelCase )
if shard_model:
lowerCAmelCase = os.listdir(lowerCamelCase )
lowerCAmelCase = sorted(filter(lambda lowerCamelCase : s.startswith('layer' ) and "model_00" in s , lowerCamelCase ) )
lowerCAmelCase = {'weight_map': {}, 'metadata': {}}
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = BloomConfig()
for j, file in enumerate(lowerCamelCase ):
print('Processing file: {}'.format(lowerCamelCase ) )
lowerCAmelCase = None
for i in range(lowerCamelCase ):
# load all TP files
lowerCAmelCase = file.replace('model_00' , f'''model_0{i}''' )
lowerCAmelCase = torch.load(os.path.join(lowerCamelCase , lowerCamelCase ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(lowerCamelCase )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
torch.save(
lowerCamelCase , os.path.join(
lowerCamelCase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(lowerCamelCase ) ).zfill(5 ) )
lowerCAmelCase = BloomConfig()
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase = total_size
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowerCamelCase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase = json.dumps(lowerCamelCase , indent=2 , sort_keys=lowerCamelCase ) + '\n'
f.write(lowerCamelCase )
else:
lowerCAmelCase = BloomModel(lowerCamelCase )
lowerCAmelCase = os.listdir(lowerCamelCase )
lowerCAmelCase = sorted(filter(lambda lowerCamelCase : s.startswith('layer' ) and "model_00" in s , lowerCamelCase ) )
lowerCAmelCase = None
for i, file in enumerate(lowerCamelCase ):
lowerCAmelCase = None
for i in range(lowerCamelCase ):
# load all TP files
lowerCAmelCase = file.replace('model_00' , f'''model_0{i}''' )
lowerCAmelCase = torch.load(os.path.join(lowerCamelCase , lowerCamelCase ) , map_location='cpu' )
# Rename keys in the transformers names
lowerCAmelCase = list(temp.keys() )
for key in keys:
lowerCAmelCase = temp.pop(lowerCamelCase )
if tensors is None:
lowerCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase = torch.cat([tensors[key], temp[key]] , dim=lowerCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowerCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase = tensors[key] / pretraining_tp
lowerCAmelCase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
lowerCAmelCase = set(other_keys.missing_keys )
else:
lowerCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
lowerCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
__snake_case =parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 4 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 1 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def a_ ( lowerCamelCase : int ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ="""▁"""
__snake_case ={
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
__snake_case ={
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
__snake_case =["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
__snake_case ={"""mustc""": MUSTC_LANGS}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = MAX_MODEL_INPUT_SIZES
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[int] = []
def __init__( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : str , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , do_upper_case=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , lang_codes=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowerCAmelCase = do_upper_case
lowerCAmelCase = do_lower_case
lowerCAmelCase = load_json(UpperCAmelCase__ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = spm_file
lowerCAmelCase = load_spm(UpperCAmelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCAmelCase = lang_codes
lowerCAmelCase = LANGUAGES[lang_codes]
lowerCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs]
lowerCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
lowerCAmelCase = self.lang_tokens
lowerCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCAmelCase = {}
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return len(self.encoder )
@property
def __UpperCAmelCase ( self : Tuple ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple ) -> None:
lowerCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = self.lang_code_to_id[tgt_lang]
lowerCAmelCase = [lang_code_id]
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Dict ) -> Optional[int]:
return self.encoder.get(UpperCAmelCase__ , self.encoder[self.unk_token] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> str:
return self.decoder.get(UpperCAmelCase__ , self.unk_token )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[str] ) -> str:
lowerCAmelCase = []
lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCAmelCase = self.sp_model.decode(UpperCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowerCAmelCase = self.sp_model.decode(UpperCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = [1] * len(self.prefix_tokens )
lowerCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + ([0] * len(UpperCAmelCase__ )) + suffix_ones
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Dict:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Dict ) -> None:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = Path(UpperCAmelCase__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
lowerCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowerCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , UpperCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (str(UpperCAmelCase__ ), str(UpperCAmelCase__ ))
def a_ ( lowerCamelCase : str , lowerCamelCase : Dict[str, Any] ):
lowerCAmelCase = sentencepiece.SentencePieceProcessor(**lowerCamelCase )
spm.Load(str(lowerCamelCase ) )
return spm
def a_ ( lowerCamelCase : str ):
with open(lowerCamelCase , 'r' ) as f:
return json.load(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : str ):
with open(lowerCamelCase , 'w' ) as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=2 )
| 4 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__snake_case =8
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=BITS ):
lowerCAmelCase = x.device
lowerCAmelCase = (x * 255).int().clamp(0 , 255 )
lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase )
lowerCAmelCase = rearrange(lowerCamelCase , 'd -> d 1 1' )
lowerCAmelCase = rearrange(lowerCamelCase , 'b c h w -> b c 1 h w' )
lowerCAmelCase = ((x & mask) != 0).float()
lowerCAmelCase = rearrange(lowerCamelCase , 'b c d h w -> b (c d) h w' )
lowerCAmelCase = bits * 2 - 1
return bits
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str]=BITS ):
lowerCAmelCase = x.device
lowerCAmelCase = (x > 0).int()
lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase , dtype=torch.intaa )
lowerCAmelCase = rearrange(lowerCamelCase , 'd -> d 1 1' )
lowerCAmelCase = rearrange(lowerCamelCase , 'b (c d) h w -> b c d h w' , d=8 )
lowerCAmelCase = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self : Optional[int] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : float = 0.0 , lowerCamelCase : bool = True , lowerCamelCase : Any=None , lowerCamelCase : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase = self.alphas_cumprod[timestep]
lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase = self._get_variance(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase = model_output.device if torch.is_tensor(lowerCamelCase ) else 'cpu'
lowerCAmelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase ).to(lowerCamelCase )
lowerCAmelCase = self._get_variance(lowerCamelCase , lowerCamelCase ) ** 0.5 * eta * noise
lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def a_ ( self : List[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : List[Any]="epsilon" , lowerCamelCase : Optional[Any]=None , lowerCamelCase : bool = True , ):
lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase = torch.split(lowerCamelCase , sample.shape[1] , dim=1 )
else:
lowerCAmelCase = None
# 1. compute alphas, betas
lowerCAmelCase = self.alphas_cumprod[t]
lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase = 1 - alpha_prod_t
lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase = 0
if t > 0:
lowerCAmelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase ).to(model_output.device )
lowerCAmelCase = (self._get_variance(lowerCamelCase , predicted_variance=lowerCamelCase ) ** 0.5) * noise
lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : int , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase__ : Optional[float] = 1.0 , ) -> int:
super().__init__()
lowerCAmelCase = bit_scale
lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Tuple , UpperCAmelCase__ : Optional[int] = 2_5_6 , UpperCAmelCase__ : Optional[int] = 2_5_6 , UpperCAmelCase__ : Optional[int] = 5_0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : str , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase__ , )
lowerCAmelCase = decimal_to_bits(UpperCAmelCase__ ) * self.bit_scale
lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
lowerCAmelCase = bits_to_decimal(UpperCAmelCase__ )
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
'''simple docstring'''
__snake_case =256
# Modulus to hash a string
__snake_case =1_000_003
def a_ ( lowerCamelCase : str , lowerCamelCase : str ):
lowerCAmelCase = len(lowerCamelCase )
lowerCAmelCase = len(lowerCamelCase )
if p_len > t_len:
return False
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCamelCase ):
lowerCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a_ ( ):
lowerCAmelCase = 'abc1abc12'
lowerCAmelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowerCAmelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(lowerCamelCase , lowerCamelCase ) and not rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 2)
lowerCAmelCase = 'ABABX'
lowerCAmelCase = 'ABABZABABYABABX'
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 3)
lowerCAmelCase = 'AAAB'
lowerCAmelCase = 'ABAAAAAB'
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 4)
lowerCAmelCase = 'abcdabcy'
lowerCAmelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 5)
lowerCAmelCase = 'Lü'
lowerCAmelCase = 'Lüsai'
assert rabin_karp(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 'Lue'
assert not rabin_karp(lowerCamelCase , lowerCamelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 4 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 1 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : bool , lowerCamelCase : bool ):
def run_func(lowerCamelCase : List[str] ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase : List[str] , **lowerCamelCase : Tuple ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase : List[Any] , **lowerCamelCase : Any ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a_ ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ):
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : TensorFlowBenchmarkArguments
lowerCamelCase : PretrainedConfig
lowerCamelCase : str = "TensorFlow"
@property
def __UpperCAmelCase ( self : int ) -> Optional[int]:
return tf.__version__
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> float:
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_speed(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_inference_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCAmelCase__ )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase = self._prepare_train_func(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return self._measure_memory(_train )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , training=UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCAmelCase__ , training=UpperCAmelCase__ )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Callable[[], None]:
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase = (
hasattr(UpperCAmelCase__ , 'architectures' )
and isinstance(config.architectures , UpperCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_cls(UpperCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCAmelCase__ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(UpperCAmelCase__ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__ )[0]
lowerCAmelCase = tf.gradients(UpperCAmelCase__ , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
UpperCAmelCase__ , repeat=self.args.repeat , number=1_0 , )
return min(UpperCAmelCase__ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(UpperCAmelCase__ )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(UpperCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(UpperCAmelCase__ )
lowerCAmelCase = Memory(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(UpperCAmelCase__ )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 4 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__snake_case =logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
lowerCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = v.to_dict()
return d
| 4 | 1 |
'''simple docstring'''
__snake_case =[
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 4 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__snake_case =logging.get_logger("""transformers.models.encodec""")
__snake_case ={
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
__snake_case ={
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
__snake_case ={
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
__snake_case ={
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
__snake_case ={
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__snake_case ={
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__snake_case =[]
__snake_case =[]
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str] ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
elif weight_type == "running_mean":
lowerCAmelCase = value
elif weight_type == "running_var":
lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : str ):
lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(f'''{name} was ignored''' )
continue
lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowerCAmelCase , lowerCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight_ih_l0" in name:
lowerCAmelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
lowerCAmelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
lowerCAmelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
lowerCAmelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
lowerCAmelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
lowerCAmelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
lowerCAmelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
lowerCAmelCase = 'bias_hh_l1'
elif "bias" in name:
lowerCAmelCase = 'bias'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "running_mean" in name:
lowerCAmelCase = 'running_mean'
elif "running_var" in name:
lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
lowerCAmelCase = 'num_batches_tracked'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , ):
if config_path is not None:
lowerCAmelCase = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowerCAmelCase = [8, 5, 4, 4]
lowerCAmelCase = [2.2]
lowerCAmelCase = 64
lowerCAmelCase = 32000
lowerCAmelCase = 2048
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
elif model_name == "encodec_48khz":
lowerCAmelCase = [8, 5, 4, 2]
lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
lowerCAmelCase = 48000
lowerCAmelCase = 2
lowerCAmelCase = False
lowerCAmelCase = 'time_group_norm'
lowerCAmelCase = True
lowerCAmelCase = 1.0
lowerCAmelCase = 0.01
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = EncodecModel(lowerCamelCase )
lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase )
lowerCAmelCase = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowerCAmelCase = original_checkpoint['best_state']
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 4 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Dict , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> None:
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 4 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case =logging.getLogger(__name__)
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Dict ):
return (preds == labels).mean()
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
lowerCamelCase : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
lowerCamelCase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCAmelCase = processors[data_args.task_name]()
lowerCAmelCase = processor.get_labels()
lowerCAmelCase = len(lowerCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase : EvalPrediction ) -> Dict:
lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase , p.label_ids )}
# Data collator
lowerCAmelCase = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase = trainer.evaluate()
lowerCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , lowerCamelCase , lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(lowerCamelCase )
return results
def a_ ( lowerCamelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 4 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = '''mvp'''
lowerCamelCase : str = ['''past_key_values''']
lowerCamelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[Any]=5_0_2_6_7 , UpperCAmelCase__ : Union[str, Any]=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : Optional[Any]=4_0_9_6 , UpperCAmelCase__ : str=1_6 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Optional[Any]=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Union[str, Any]=1_0_0 , UpperCAmelCase__ : Tuple=8_0_0 , **UpperCAmelCase__ : Optional[int] , ) -> List[str]:
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = classifier_dropout
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = use_prompt
lowerCAmelCase = prompt_length
lowerCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase__ ):
lowerCAmelCase = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 4 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4 | 1 |
'''simple docstring'''
from math import factorial
def a_ ( lowerCamelCase : int = 100 ):
return sum(int(lowerCamelCase ) for x in str(factorial(lowerCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 4 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase_ ( __lowercase ):
def __lt__( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self : str , UpperCAmelCase__ : List[str] ) -> Tuple:
return self[-1] == other[-1]
def a_ ( lowerCamelCase : list ):
lowerCAmelCase = []
# sort into stacks
for element in collection:
lowerCAmelCase = Stack([element] )
lowerCAmelCase = bisect_left(lowerCamelCase , lowerCamelCase )
if i != len(lowerCamelCase ):
stacks[i].append(lowerCamelCase )
else:
stacks.append(lowerCamelCase )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase = merge(*(reversed(lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__snake_case =input("""Enter numbers separated by a comma:\n""").strip()
__snake_case =[int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 4 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case ={
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = job['started_at']
lowerCAmelCase = job['completed_at']
lowerCAmelCase = date_parser.parse(lowerCamelCase )
lowerCAmelCase = date_parser.parse(lowerCamelCase )
lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = duration_in_min
return job_info
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=None ):
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
lowerCAmelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCAmelCase = requests.get(lowerCamelCase , headers=lowerCamelCase ).json()
lowerCAmelCase = {}
try:
job_time.update({job['name']: extract_time_from_single_job(lowerCamelCase ) for job in result['jobs']} )
lowerCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCamelCase ):
lowerCAmelCase = requests.get(url + f'''&page={i + 2}''' , headers=lowerCamelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(lowerCamelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
__snake_case =parser.parse_args()
__snake_case =get_job_time(args.workflow_run_id)
__snake_case =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 4 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[Any] ) -> Any:
lowerCAmelCase = {}
def __UpperCAmelCase ( self : Any ) -> None:
print(self.vertex )
for i in self.vertex:
print(UpperCAmelCase__ , ' -> ' , ' -> '.join([str(UpperCAmelCase__ ) for j in self.vertex[i]] ) )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCAmelCase__ )
else:
# else make a new vertex
lowerCAmelCase = [to_vertex]
def __UpperCAmelCase ( self : List[Any] ) -> None:
# visited array for storing already visited nodes
lowerCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : list ) -> None:
# mark start vertex as visited
lowerCAmelCase = True
print(UpperCAmelCase__ , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
__snake_case =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 4 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 1 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ):
lowerCAmelCase = sorted(zip(lowerCamelCase , lowerCamelCase ) , key=lambda lowerCamelCase : x[0] / x[1] , reverse=lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase = list(accumulate(lowerCamelCase ) )
lowerCAmelCase = bisect(lowerCamelCase , lowerCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = torch.nn.Linear(1_0 , 1_0 )
lowerCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
lowerCAmelCase = Accelerator()
lowerCAmelCase = accelerator.prepare(UpperCAmelCase__ )
try:
pickle.loads(pickle.dumps(UpperCAmelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 4 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__snake_case =None
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case ={
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
__snake_case ={
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
__snake_case =["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = MBartTokenizer
lowerCamelCase : List[int] = []
lowerCamelCase : List[int] = []
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : str="<s>" , UpperCAmelCase__ : str="<unk>" , UpperCAmelCase__ : Dict="<pad>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : str , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase = src_lang if src_lang is not None else 'en_XX'
lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : Tuple ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCAmelCase = src_lang
lowerCAmelCase = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "en_XX" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "ro_RO" , **UpperCAmelCase__ : Dict , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : int ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 4 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__snake_case ={"""facebook/blenderbot-3B""": 128}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[Any] = BlenderbotTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str="replace" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Tuple="</s>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : int="<pad>" , UpperCAmelCase__ : Union[str, Any]="<mask>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Union[str, Any]=True , **UpperCAmelCase__ : Optional[int] , ) -> int:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = getattr(UpperCAmelCase__ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**UpperCAmelCase__ )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = 'post_processor'
lowerCAmelCase = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase = tuple(state['cls'] )
lowerCAmelCase = False
if state.get('add_prefix_space' , UpperCAmelCase__ ) != add_prefix_space:
lowerCAmelCase = add_prefix_space
lowerCAmelCase = True
if state.get('trim_offsets' , UpperCAmelCase__ ) != trim_offsets:
lowerCAmelCase = trim_offsets
lowerCAmelCase = True
if changes_to_apply:
lowerCAmelCase = getattr(UpperCAmelCase__ , state.pop('type' ) )
lowerCAmelCase = component_class(**UpperCAmelCase__ )
setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value
lowerCAmelCase = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ) -> BatchEncoding:
lowerCAmelCase = kwargs.get('is_split_into_words' , UpperCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase__ )
lowerCAmelCase = ' '.join(UpperCAmelCase__ )
lowerCAmelCase = self.encode(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 4 | 1 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
__snake_case =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : Tuple ) -> Tuple:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = eval_examples
lowerCAmelCase = post_process_function
lowerCAmelCase = quant_trainer_args
lowerCAmelCase = 1_2_8 # default number of calibration samples
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[str]=None ) -> Union[str, Any]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
lowerCAmelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase = self._remove_unused_columns(UpperCAmelCase__ , description='Calibration' )
return DataLoader(
UpperCAmelCase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : int=None ) -> Tuple:
lowerCAmelCase = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase = self.get_calib_dataloader(UpperCAmelCase__ )
lowerCAmelCase = self.model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args , calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info('***** Running calibration *****' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prediction_step(UpperCAmelCase__ , UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__ , self.quant_trainer_args )
lowerCAmelCase = model
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : str = "eval" ) -> int:
lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase = self.get_eval_dataloader(UpperCAmelCase__ )
lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase = eval_loop(
UpperCAmelCase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowerCAmelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
lowerCAmelCase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCAmelCase = metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
lowerCAmelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : str = "test" ) -> int:
lowerCAmelCase = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase = self.compute_metrics
lowerCAmelCase = None
lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase = eval_loop(
UpperCAmelCase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , )
finally:
lowerCAmelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , 'predict' )
lowerCAmelCase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowerCAmelCase = metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]="./" ) -> Union[str, Any]:
lowerCAmelCase = self.eval_dataset
lowerCAmelCase = self.get_eval_dataloader(UpperCAmelCase__ )
lowerCAmelCase = next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
lowerCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
lowerCAmelCase = tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase = True
lowerCAmelCase = self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
lowerCAmelCase = model.module if hasattr(UpperCAmelCase__ , 'module' ) else model
quant_trainer.configure_model(UpperCAmelCase__ , self.quant_trainer_args )
lowerCAmelCase = os.path.join(UpperCAmelCase__ , 'model.onnx' )
logger.info(F'''exporting model to {output_model_file}''' )
lowerCAmelCase = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , export_params=UpperCAmelCase__ , opset_version=1_3 , do_constant_folding=UpperCAmelCase__ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCAmelCase__ , )
logger.info('onnx export finished' )
| 4 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 1 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__snake_case =logging.getLogger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None ) -> str:
lowerCAmelCase = self.layer[current_layer](UpperCAmelCase__ , UpperCAmelCase__ , head_mask[current_layer] )
lowerCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : int , UpperCAmelCase__ : Tuple ) -> Dict:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = BertEncoderWithPabee(UpperCAmelCase__ )
self.init_weights()
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = threshold
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Dict ) -> Dict:
lowerCAmelCase = patience
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase = 0
lowerCAmelCase = 0
def __UpperCAmelCase ( self : int ) -> str:
lowerCAmelCase = self.inference_layers_num / self.inference_instances_num
lowerCAmelCase = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(UpperCAmelCase__ )
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=False , ) -> List[Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
if token_type_ids is None:
lowerCAmelCase = torch.zeros(UpperCAmelCase__ , dtype=torch.long , device=UpperCAmelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase = self.get_extended_attention_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = encoder_hidden_states.size()
lowerCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowerCAmelCase = torch.ones(UpperCAmelCase__ , device=UpperCAmelCase__ )
lowerCAmelCase = self.invert_attention_mask(UpperCAmelCase__ )
else:
lowerCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase = self.get_head_mask(UpperCAmelCase__ , self.config.num_hidden_layers )
lowerCAmelCase = self.embeddings(
input_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ )
lowerCAmelCase = embedding_output
if self.training:
lowerCAmelCase = []
for i in range(self.config.num_hidden_layers ):
lowerCAmelCase = self.encoder.adaptive_forward(
UpperCAmelCase__ , current_layer=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
lowerCAmelCase = output_layers[i](output_dropout(UpperCAmelCase__ ) )
res.append(UpperCAmelCase__ )
elif self.patience == 0: # Use all layers for inference
lowerCAmelCase = self.encoder(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = self.pooler(encoder_outputs[0] )
lowerCAmelCase = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase__ )]
else:
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowerCAmelCase = self.encoder.adaptive_forward(
UpperCAmelCase__ , current_layer=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , head_mask=UpperCAmelCase__ )
lowerCAmelCase = self.pooler(UpperCAmelCase__ )
lowerCAmelCase = output_layers[i](UpperCAmelCase__ )
if regression:
lowerCAmelCase = logits.detach()
if patient_result is not None:
lowerCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowerCAmelCase = 0
else:
lowerCAmelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowerCAmelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase__ ) ):
patient_counter += 1
else:
lowerCAmelCase = 0
lowerCAmelCase = logits
if patient_counter == self.patience:
break
lowerCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , __lowercase , )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> List[str]:
super().__init__(UpperCAmelCase__ )
lowerCAmelCase = config.num_labels
lowerCAmelCase = BertModelWithPabee(UpperCAmelCase__ )
lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Any=None , ) -> Any:
lowerCAmelCase = self.bert(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , head_mask=UpperCAmelCase__ , inputs_embeds=UpperCAmelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowerCAmelCase = (logits[-1],)
if labels is not None:
lowerCAmelCase = None
lowerCAmelCase = 0
for ix, logits_item in enumerate(UpperCAmelCase__ ):
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase = MSELoss()
lowerCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase = CrossEntropyLoss()
lowerCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowerCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowerCAmelCase = (total_loss / total_weights,) + outputs
return outputs
| 4 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[int] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_euler' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe([prompt] , generator=UpperCAmelCase__ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sd_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=UpperCAmelCase__ , )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 4 | 1 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__snake_case =open # noqa: we just need to have a builtin inside this module to test it properly
| 4 |
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=0 ):
# Format the message.
if name is None:
lowerCAmelCase = None
else:
lowerCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
lowerCAmelCase = fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , ':' , val.size() )
else:
print(lowerCamelCase , ':' , lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowerCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 2 )
lowerCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase = param.view(*lowerCamelCase )
lowerCAmelCase = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase = param.view(*lowerCamelCase )
return param
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : str ):
# The converted output model.
lowerCAmelCase = {}
# old versions did not store training args
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase = ds_args.padded_vocab_size
lowerCAmelCase = ds_args.max_position_embeddings
lowerCAmelCase = ds_args.hidden_size
lowerCAmelCase = ds_args.num_layers
lowerCAmelCase = ds_args.num_attention_heads
lowerCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase = config.n_head
# The hidden_size per head.
lowerCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase = input_state_dict['checkpoint_version']
else:
lowerCAmelCase = 0.0
# The model.
lowerCAmelCase = input_state_dict['model']
# The language model.
lowerCAmelCase = model['language_model']
# The embeddings.
lowerCAmelCase = lm['embedding']
# The word embeddings.
lowerCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase = word_embeddings[: config.vocab_size, :]
lowerCAmelCase = word_embeddings
# The position embeddings.
lowerCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase = pos_embeddings
# The transformer.
lowerCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
lowerCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowerCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase = layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase = m.group(3 )
# The name of the layer.
lowerCAmelCase = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowerCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
lowerCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase = masked_bias
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase = fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
lowerCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase = megatron_to_transformers[op_name]
lowerCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase = transformer['final_layernorm.weight']
lowerCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def a_ ( ):
# Create the argument parser.
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowerCAmelCase = parser.parse_args()
# Extract the basename.
lowerCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowerCAmelCase = torch.load(lowerCamelCase , map_location='cpu' )
else:
lowerCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowerCAmelCase = input_state_dict.get('args' , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
lowerCAmelCase = 'gelu_new'
else:
lowerCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
lowerCAmelCase = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
lowerCAmelCase = convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase = 'gpt2'
lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase )
lowerCAmelCase = type(lowerCamelCase ).__name__
lowerCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
lowerCAmelCase = os.path.join(lowerCamelCase , 'pytorch_model.bin' )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(UpperCAmelCase__ )] for r in range(UpperCAmelCase__ )]
def __str__( self : List[str] ) -> str:
lowerCAmelCase = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(UpperCAmelCase__ , len(str(UpperCAmelCase__ ) ) )
lowerCAmelCase = F'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase__ : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase__ ) for row_vector in self.array )
return s
def __repr__( self : List[str] ) -> str:
return str(self )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : tuple[int, int] ) -> bool:
if not (isinstance(UpperCAmelCase__ , (list, tuple) ) and len(UpperCAmelCase__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(UpperCAmelCase__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Dict , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : float ) -> None:
assert self.validate_indicies(UpperCAmelCase__ )
lowerCAmelCase = value
def __add__( self : Any , UpperCAmelCase__ : Matrix ) -> Matrix:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self : str , UpperCAmelCase__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : str , UpperCAmelCase__ : int | float | Matrix ) -> Matrix:
if isinstance(UpperCAmelCase__ , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = F'''Unsupported type given for another ({type(UpperCAmelCase__ )})'''
raise TypeError(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Matrix , UpperCAmelCase__ : Matrix ) -> Any:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ):
# a^(-1)
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase , lowerCamelCase )}''' )
def a_ ( ):
import doctest
doctest.testmod()
testa()
| 4 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase_ ( __lowercase , __lowercase ):
lowerCamelCase : Union[str, Any] = '''swin'''
lowerCamelCase : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , UpperCAmelCase__ : List[Any]=2_2_4 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Union[str, Any]=9_6 , UpperCAmelCase__ : Any=[2, 2, 6, 2] , UpperCAmelCase__ : List[str]=[3, 6, 1_2, 2_4] , UpperCAmelCase__ : List[Any]=7 , UpperCAmelCase__ : Dict=4.0 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[Any]=1E-5 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Dict , ) -> int:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(UpperCAmelCase__ ) - 1) )
lowerCAmelCase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(UpperCAmelCase__ ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , stage_names=self.stage_names )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : Dict ) -> float:
return 1E-4
| 4 |
'''simple docstring'''
class UpperCAmelCase_ :
def __init__( self : List[str] , UpperCAmelCase__ : list[int] ) -> None:
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = [0] * len_array
if len_array > 0:
lowerCAmelCase = array[0]
for i in range(1 , UpperCAmelCase__ ):
lowerCAmelCase = self.prefix_sum[i - 1] + array[i]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : int ) -> bool:
lowerCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case ={
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 4 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
lowerCAmelCase = state_dict.pop(lowerCamelCase )
lowerCAmelCase = val
def a_ ( lowerCamelCase : Optional[Any] ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCAmelCase = value
else:
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : Optional[Any] ):
lowerCAmelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:256, :]
lowerCAmelCase = in_proj_bias[:256]
lowerCAmelCase = in_proj_weight[256:512, :]
lowerCAmelCase = in_proj_bias[256:512]
lowerCAmelCase = in_proj_weight[-256:, :]
lowerCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:256, :]
lowerCAmelCase = in_proj_bias[:256]
lowerCAmelCase = in_proj_weight[256:512, :]
lowerCAmelCase = in_proj_bias[256:512]
lowerCAmelCase = in_proj_weight[-256:, :]
lowerCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase = in_proj_bias_cross_attn[:256]
lowerCAmelCase = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase = in_proj_bias_cross_attn[256:512]
lowerCAmelCase = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase = in_proj_bias_cross_attn[-256:]
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase = image.size
lowerCAmelCase = max(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 800 if 'detection' in checkpoint_url else 1000
lowerCAmelCase = target_max_size / current_max_size
lowerCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a_ ( lowerCamelCase : Optional[Any] ):
lowerCAmelCase = F.to_tensor(lowerCamelCase )
lowerCAmelCase = F.normalize(lowerCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ):
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = rename_backbone_keys(lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCAmelCase = state_dict.pop(lowerCamelCase )
lowerCAmelCase = val
# create HuggingFace model and load state dict
lowerCAmelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCAmelCase = 15
lowerCAmelCase = 2
lowerCAmelCase = {0: 'table', 1: 'table rotated'}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase = 125
lowerCAmelCase = 6
lowerCAmelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
lowerCAmelCase = TableTransformerForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# verify our conversion
lowerCAmelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
lowerCAmelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=lowerCamelCase )
lowerCAmelCase = Image.open(lowerCamelCase ).convert('RGB' )
lowerCAmelCase = normalize(resize(lowerCamelCase , lowerCamelCase ) ).unsqueeze(0 )
lowerCAmelCase = model(lowerCamelCase )
if "detection" in checkpoint_url:
lowerCAmelCase = (1, 15, 3)
lowerCAmelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowerCAmelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowerCAmelCase = (1, 125, 7)
lowerCAmelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowerCAmelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
lowerCAmelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(lowerCamelCase )
image_processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def a_ ( ):
lowerCAmelCase = 9
lowerCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase = kruskal(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase ) == sorted(lowerCamelCase )
| 4 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict=1_3 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : int=1_9 , UpperCAmelCase__ : str=3_2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Dict=3_7 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[int]=None , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any] ) -> int:
lowerCAmelCase = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=UpperCAmelCase__ , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] ) -> Tuple:
lowerCAmelCase = EsmForProteinFolding(config=UpperCAmelCase__ ).float()
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __UpperCAmelCase ( self : Dict ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCamelCase : Union[str, Any] = ()
lowerCamelCase : List[Any] = {} if is_torch_available() else {}
lowerCamelCase : Optional[Any] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = EsmFoldModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@unittest.skip('Does not support attention outputs' )
def __UpperCAmelCase ( self : Any ) -> Dict:
pass
@unittest.skip
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Dict ) -> str:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Any ) -> str:
pass
@unittest.skip('ESMFold only has one output format.' )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def __UpperCAmelCase ( self : str ) -> Tuple:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@require_torch
class UpperCAmelCase_ ( __lowercase ):
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
lowerCAmelCase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCAmelCase = model(UpperCAmelCase__ )['positions']
lowerCAmelCase = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 1 |
Subsets and Splits