code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import os def lowerCAmelCase_ ( snake_case__ = "matrix.txt" ): '''simple docstring''' with open(os.path.join(os.path.dirname(snake_case__ ) , snake_case__ ) ) as in_file: A : Dict = in_file.read() A : Union[str, Any] = [[int(snake_case__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()] A : Union[str, Any] = [[0 for cell in row] for row in grid] A : Any = len(grid[0] ) A : int = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] A : List[str] = grid[0][0] for i in range(1 , snake_case__ ): A : Any = grid[0][i] + dp[0][i - 1] for i in range(1 , snake_case__ ): A : Dict = grid[i][0] + dp[i - 1][0] for i in range(1 , snake_case__ ): for j in range(1 , snake_case__ ): A : Union[str, Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'''{solution() = }''')
3
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowercase : Tuple = logging.get_logger(__name__) lowercase : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase : Union[str, Any] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } lowercase : List[str] = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } lowercase : List[str] = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_INIT_CONFIGURATION __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = SqueezeBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): A : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) ) A : Dict = do_lower_case A : Dict = strip_accents A : Any = tokenize_chinese_chars A : Optional[int] = normalizer_class(**SCREAMING_SNAKE_CASE ) A : str = do_lower_case def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: """simple docstring""" A : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Union[str, Any] = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" A : str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
1
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowercase : str = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowercase : Tuple = logging.getLogger() def lowerCAmelCase_ ( ): '''simple docstring''' A : Tuple = argparse.ArgumentParser() parser.add_argument('''-f''' ) A : str = parser.parse_args() return args.f def lowerCAmelCase_ ( snake_case__ , snake_case__="eval" ): '''simple docstring''' A : str = os.path.join(snake_case__ , F'{split}_results.json' ) if os.path.exists(snake_case__ ): with open(snake_case__ , '''r''' ) as f: return json.load(snake_case__ ) raise ValueError(F'can\'t find {path}' ) lowercase : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class A ( __snake_case ): def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[Any] = self.get_auto_remove_tmp_dir() A : int = F'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split() with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): run_flax_glue.main() A : Any = get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : str = self.get_auto_remove_tmp_dir() A : Dict = F'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split() with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): run_clm_flax.main() A : Optional[Any] = get_results(SCREAMING_SNAKE_CASE ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : int = self.get_auto_remove_tmp_dir() A : Optional[Any] = F'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split() with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): run_summarization_flax.main() A : Any = get_results(SCREAMING_SNAKE_CASE , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : int = self.get_auto_remove_tmp_dir() A : Optional[int] = F'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split() with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): run_mlm_flax.main() A : str = get_results(SCREAMING_SNAKE_CASE ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = self.get_auto_remove_tmp_dir() A : List[Any] = F'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split() with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): run_ta_mlm_flax.main() A : Optional[Any] = get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[str] = 7 if get_gpu_count() > 1 else 2 A : Optional[int] = self.get_auto_remove_tmp_dir() A : Tuple = F'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split() with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): run_flax_ner.main() A : str = get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Any = self.get_auto_remove_tmp_dir() A : List[str] = F'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split() with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): run_qa.main() A : List[Any] = get_results(SCREAMING_SNAKE_CASE ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
3
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
1
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow lowercase : Optional[Any] = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class A ( unittest.TestCase ): def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ) -> List[Any]: """simple docstring""" A : List[Any] = [file for file in os.listdir(SCREAMING_SNAKE_CASE ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )] if identifier is not None: A : Optional[int] = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): for n_ in n_identifier: A : Union[str, Any] = [file for file in files if n_ not in file] else: A : Any = [file for file in files if n_identifier not in file] A : Union[str, Any] = ignore_files or [] ignore_files.append('''__init__.py''' ) A : int = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , SCREAMING_SNAKE_CASE ) if only_modules: A : List[Any] = file.split('''.''' )[0] try: A : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = doctest.DocTestSuite(SCREAMING_SNAKE_CASE ) A : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: A : Optional[int] = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = Path('''src/transformers''' ) A : Any = '''modeling''' A : Tuple = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(SCREAMING_SNAKE_CASE , identifier=SCREAMING_SNAKE_CASE , ignore_files=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Dict = Path('''src/transformers''' ) A : Optional[Any] = '''tokenization''' self.analyze_directory(SCREAMING_SNAKE_CASE , identifier=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Any = Path('''src/transformers''' ) A : Optional[int] = '''configuration''' self.analyze_directory(SCREAMING_SNAKE_CASE , identifier=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Tuple = Path('''src/transformers''' ) A : Dict = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(SCREAMING_SNAKE_CASE , n_identifier=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[Any] = Path('''docs/source''' ) A : List[str] = ['''favicon.ico'''] self.analyze_directory(SCREAMING_SNAKE_CASE , ignore_files=SCREAMING_SNAKE_CASE , only_modules=SCREAMING_SNAKE_CASE )
3
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
1
'''simple docstring''' import pytest lowercase : Dict = '__dummy_dataset1__' lowercase : Optional[int] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def lowerCAmelCase_ ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowerCAmelCase_ ( ): '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = dataset_loading_script_name A : Union[str, Any] = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=snake_case__ ) A : Union[str, Any] = script_dir / F'{script_name}.py' with open(snake_case__ , '''w''' ) as f: f.write(snake_case__ ) return str(snake_case__ )
3
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging lowercase : Tuple = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''audio_values''', '''audio_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=[16, 16] , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=44100 , SCREAMING_SNAKE_CASE=86 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=0.0 , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" super().__init__( feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : str = spectrogram_length A : Optional[int] = num_channels A : str = patch_size A : Optional[Any] = feature_size // self.patch_size[1] A : str = n_fft A : Dict = sampling_rate // hop_length_to_sampling_rate A : Optional[int] = sampling_rate A : int = padding_value A : Union[str, Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> np.ndarray: """simple docstring""" A : Dict = spectrogram( SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) A : int = log_spec[:, :-1] A : Dict = log_spec - 20.0 A : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' F' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[str] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : str = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : int = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : List[str] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : List[Any] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis A : Optional[Any] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask A : Optional[int] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: A : List[Any] = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] A : List[Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding A : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch A : List[str] = np.ones([len(SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) A : int = padded_audio_features * self.padding_value for i in range(len(SCREAMING_SNAKE_CASE ) ): A : Optional[Any] = audio_features[i] A : str = feature # return as BatchFeature if return_attention_mask: A : int = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: A : List[str] = {'''audio_values''': padded_audio_features} A : Optional[int] = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE ) return encoded_inputs
3
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ = "https://www.worldometers.info/coronavirus" ): '''simple docstring''' A : Optional[Any] = BeautifulSoup(requests.get(snake_case__ ).text , '''html.parser''' ) A : Any = soup.findAll('''h1''' ) A : Union[str, Any] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(snake_case__ , snake_case__ )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
3
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class A ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=18 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , ) -> Union[str, Any]: """simple docstring""" A : int = size if size is not None else {'''height''': 18, '''width''': 18} A : List[str] = parent A : str = batch_size A : List[Any] = num_channels A : Optional[int] = image_size A : Optional[int] = min_resolution A : Dict = max_resolution A : Optional[int] = do_resize A : List[Any] = size A : Union[str, Any] = apply_ocr def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class A ( __snake_case , unittest.TestCase ): __magic_name__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Tuple = LayoutLMvaImageProcessingTester(self ) @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''apply_ocr''' ) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" pass def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input A : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE ) self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE ) # Test batched A : str = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input A : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[Any] = LayoutLMvaImageProcessor() from datasets import load_dataset A : Any = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) A : str = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) A : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 A : List[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 A : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE ) self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE ) # with apply_OCR = False A : Any = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE ) A : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
3
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
1
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowerCAmelCase_ ( snake_case__="" ): '''simple docstring''' A : Optional[Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = torch.rand(12 , dtype=torch.floataa ) - 0.5 A : int = AgentAudio(SCREAMING_SNAKE_CASE ) A : List[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) ) # Ensure that the file contains the same value as the original tensor A, A : Any = sf.read(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , torch.tensor(SCREAMING_SNAKE_CASE ) , atol=1e-4 ) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5 A : int = get_new_path(suffix='''.wav''' ) sf.write(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 16000 ) A : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type.to_raw() , atol=1e-4 ) ) self.assertEqual(agent_type.to_string() , SCREAMING_SNAKE_CASE ) @require_vision @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = torch.randint(0 , 256 , (64, 64, 3) ) A : List[Any] = AgentImage(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , agent_type._tensor , atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' A : Dict = Image.open(SCREAMING_SNAKE_CASE ) A : int = AgentImage(SCREAMING_SNAKE_CASE ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Any = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' A : List[str] = Image.open(SCREAMING_SNAKE_CASE ) A : List[str] = AgentImage(SCREAMING_SNAKE_CASE ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE ) ) class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Dict = '''Hey!''' A : Any = AgentText(SCREAMING_SNAKE_CASE ) self.assertEqual(SCREAMING_SNAKE_CASE , agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE , agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : Optional[Any] = value A : Node | None = None A : Node | None = None class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : str = tree def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self ) -> Iterator[int]: """simple docstring""" yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
1
'''simple docstring''' from math import factorial def lowerCAmelCase_ ( snake_case__ = 20 ): '''simple docstring''' A : Optional[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... A : List[str] = n // 2 return int(factorial(snake_case__ ) / (factorial(snake_case__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: lowercase : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
3
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 100 ): '''simple docstring''' A : List[str] = 0 A : List[str] = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
3
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = None class A ( __snake_case , __snake_case ): __magic_name__ = 2 @register_to_config def __init__( self , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 1.007 , SCREAMING_SNAKE_CASE = 80 , SCREAMING_SNAKE_CASE = 0.05 , SCREAMING_SNAKE_CASE = 50 , ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = sigma_max # setable values A : int = None A : np.IntTensor = None A : torch.FloatTensor = None # sigma(t_i) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> torch.FloatTensor: """simple docstring""" return sample def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> str: """simple docstring""" A : Optional[Any] = num_inference_steps A : Any = np.arange(0 , self.num_inference_steps )[::-1].copy() A : int = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) A : List[Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A : Dict = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[torch.FloatTensor, float]: """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: A : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) A : Any = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE ).to(sample.device ) A : Union[str, Any] = sigma + gamma * sigma A : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ) -> Union[KarrasVeOutput, Tuple]: """simple docstring""" A : Optional[Any] = sample_hat + sigma_hat * model_output A : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat A : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , ) -> Union[KarrasVeOutput, Tuple]: """simple docstring""" A : Tuple = sample_prev + sigma_prev * model_output A : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev A : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" raise NotImplementedError()
3
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowercase : Union[str, Any] = 3 def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' print('''Generating primitive root of p''' ) while True: A : Optional[Any] = random.randrange(3 , snake_case__ ) if pow(snake_case__ , 2 , snake_case__ ) == 1: continue if pow(snake_case__ , snake_case__ , snake_case__ ) == 1: continue return g def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' print('''Generating prime p...''' ) A : int = rabin_miller.generate_large_prime(snake_case__ ) # select large prime number. A : List[str] = primitive_root(snake_case__ ) # one primitive root on modulo p. A : Union[str, Any] = random.randrange(3 , snake_case__ ) # private_key -> have to be greater than 2 for safety. A : str = cryptomath.find_mod_inverse(pow(snake_case__ , snake_case__ , snake_case__ ) , snake_case__ ) A : Dict = (key_size, e_a, e_a, p) A : Optional[int] = (key_size, d) return public_key, private_key def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() A, A : int = generate_key(snake_case__ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , '''w''' ) as fo: fo.write(F'{private_key[0]},{private_key[1]}' ) def lowerCAmelCase_ ( ): '''simple docstring''' print('''Making key files...''' ) make_key_files('''elgamal''' , 2048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
3
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
1
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
1
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
1
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed lowercase : Dict = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) lowercase : Dict = 'sshleifer/student_marian_en_ro_6_1' lowercase : Optional[int] = 'sshleifer/tiny-mbart' @require_torch class A ( __snake_case ): def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , ) -> str: """simple docstring""" A : str = self.run_trainer( eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , ) A : List[Any] = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history if not do_eval: return A : Dict = [log for log in logs if '''eval_loss''' in log.keys()] A : List[str] = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A : Tuple = eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE ) @require_torch_multi_gpu def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase ( self ) -> int: """simple docstring""" self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick( distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE ) @require_apex @require_torch_gpu def __lowerCAmelCase ( self ) -> int: """simple docstring""" self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE , extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Any = { # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } A : Any = experiments[experiment_id] A : Any = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} A : Union[str, Any] = '''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE , extra_args_str=data['''extra_args_str'''] ) A : int = len(re.findall(SCREAMING_SNAKE_CASE , cl.err ) ) self.assertEqual(SCREAMING_SNAKE_CASE , data['''n_matches'''] ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = self.run_trainer( eval_steps=2 , max_len=128 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE , ) # Check metrics A : Union[str, Any] = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history A : Union[str, Any] = [log for log in logs if '''eval_loss''' in log.keys()] A : List[str] = eval_metrics[0] A : List[Any] = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE ) # test if do_predict saves generations and metrics A : int = os.listdir(SCREAMING_SNAKE_CASE ) A : Optional[int] = {os.path.basename(SCREAMING_SNAKE_CASE ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(SCREAMING_SNAKE_CASE ) -> Tuple[int, float]: A : Optional[int] = '''--skip_memory_metrics 0''' A : List[Any] = self.run_trainer( max_len=128 , model_name=SCREAMING_SNAKE_CASE , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE , distributed=SCREAMING_SNAKE_CASE , extra_args_str=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , do_predict=SCREAMING_SNAKE_CASE , n_gpus_to_use=1 , ) # Check metrics A : str = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE , '''trainer_state.json''' ) ).log_history A : Union[str, Any] = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 ) A : int = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 ) A : List[Any] = logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A, A, A : List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A, A, A : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A : Dict = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A : Union[str, Any] = gpu_peak_mem_orig + gpu_alloc_mem_orig A : Optional[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A : str = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A : List[str] = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' , ) self.assertGreater( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' , ) self.assertEqual( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3e-3 , SCREAMING_SNAKE_CASE = "adafactor" , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , ) -> Tuple: """simple docstring""" A : Tuple = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' A : Dict = self.get_auto_remove_tmp_dir() A : int = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(SCREAMING_SNAKE_CASE )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() A : Any = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(SCREAMING_SNAKE_CASE )}\n '.split() A : Optional[Any] = ''' --do_predict '''.split() A : Union[str, Any] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A : Dict = get_gpu_count() A : Tuple = get_torch_dist_unique_port() A : str = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() A : str = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(SCREAMING_SNAKE_CASE , env=self.get_env() ) else: A : List[str] = ['''run_translation.py'''] + args with patch.object(SCREAMING_SNAKE_CASE , '''argv''' , SCREAMING_SNAKE_CASE ): main() return output_dir
3
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
1
'''simple docstring''' from collections.abc import Callable def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : float = a A : float = b if function(snake_case__ ) == 0: # one of the a or b is a root for the function return a elif function(snake_case__ ) == 0: return b elif ( function(snake_case__ ) * function(snake_case__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: A : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(snake_case__ ) == 0: return mid elif function(snake_case__ ) * function(snake_case__ ) < 0: A : Union[str, Any] = mid else: A : Optional[Any] = mid A : Optional[int] = start + (end - start) / 2.0 return mid def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 10_00)) import doctest doctest.testmod()
3
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowercase : int = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_28, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowercase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowercase : str = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55) lowercase : Dict = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowercase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowercase : Optional[int] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowercase : Dict = tf.keras.preprocessing.image.img_to_array(test_image) lowercase : Dict = np.expand_dims(test_image, axis=0) lowercase : Union[str, Any] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowercase : Tuple = 'Normal' if result[0][0] == 1: lowercase : int = 'Abnormality detected'
3
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase : int = logging.get_logger(__name__) lowercase : Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowercase : str = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } lowercase : Optional[Any] = { 'gpt-neox-20b': 20_48, } class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE ) != add_prefix_space: A : Optional[int] = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) ) A : Dict = add_prefix_space A : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE ) A : List[str] = add_prefix_space def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" A : Tuple = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[int]: """simple docstring""" A : Any = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) + [self.eos_token_id] ) if len(SCREAMING_SNAKE_CASE ) > self.model_max_length: A : List[str] = input_ids[-self.model_max_length :] return input_ids
3
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
1
'''simple docstring''' import sys lowercase : Tuple = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCAmelCase_ ( snake_case__ = N ): '''simple docstring''' A : List[Any] = -sys.maxsize - 1 for i in range(len(snake_case__ ) - 12 ): A : Union[str, Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: A : Optional[int] = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
3
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A ( __snake_case ): __magic_name__ = ['''image_processor''', '''tokenizer'''] __magic_name__ = '''ViTImageProcessor''' __magic_name__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE , ) A : int = kwargs.pop('''feature_extractor''' ) A : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: A : str = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if visual_prompt is not None: A : str = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if images is not None: A : Any = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if visual_prompt is not None and images is not None: A : Optional[int] = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: A : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: A : int = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> str: """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE , ) return self.image_processor
3
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
1
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib lowercase : List[str] = get_logger() lowercase : Optional[dict] = None class A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" super().__init__(features=SCREAMING_SNAKE_CASE ) import jax from jaxlib.xla_client import Device if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError( F'Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE )}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''' ) A : int = device if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A : Tuple = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ' F'device: {str(jax.devices()[0] )}.' ) A : Optional[Any] = str(jax.devices()[0] ) A : Dict = jnp_array_kwargs @staticmethod def __lowerCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]: """simple docstring""" import jax return {str(SCREAMING_SNAKE_CASE ): device for device in jax.devices()} def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and column: if all( isinstance(SCREAMING_SNAKE_CASE , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(SCREAMING_SNAKE_CASE , axis=0 ) return column def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE , (str, bytes, type(SCREAMING_SNAKE_CASE )) ): return value elif isinstance(SCREAMING_SNAKE_CASE , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A : int = {} if isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A : List[Any] = {'''dtype''': jnp.intaa} else: A : Tuple = {'''dtype''': jnp.intaa} elif isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A : Union[str, Any] = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ): A : List[str] = np.asarray(SCREAMING_SNAKE_CASE ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A : int = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs} ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(SCREAMING_SNAKE_CASE , '''__array__''' ) and not isinstance(SCREAMING_SNAKE_CASE , jax.Array ): A : Any = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE ) for substruct in data_struct] ) elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE ) for substruct in data_struct] ) return self._tensorize(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE , map_list=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Mapping: """simple docstring""" A : List[str] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE ) A : Any = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE ) return self.recursive_tensorize(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> "jax.Array": """simple docstring""" A : Union[str, Any] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE ) A : Tuple = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE , pa_table.column_names[0] ) A : Optional[int] = self.recursive_tensorize(SCREAMING_SNAKE_CASE ) A : List[Any] = self._consolidate(SCREAMING_SNAKE_CASE ) return column def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Mapping: """simple docstring""" A : Optional[Any] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE ) A : Any = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE ) A : Any = self.recursive_tensorize(SCREAMING_SNAKE_CASE ) for column_name in batch: A : Optional[Any] = self._consolidate(batch[column_name] ) return batch
3
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 100_0000 ): '''simple docstring''' A : str = set(range(3 , snake_case__ , 2 ) ) primes.add(2 ) for p in range(3 , snake_case__ , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) ) A : Dict = [float(snake_case__ ) for n in range(limit + 1 )] for p in primes: for n in range(snake_case__ , limit + 1 , snake_case__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
3
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): A : Union[str, Any] = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if number < 1: A : List[str] = F'Input value of [number={number}] must be > 0' raise ValueError(snake_case__ ) A : Optional[Any] = 1 for i in range(1 , snake_case__ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : Any = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
1
'''simple docstring''' import functools from typing import Any def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(snake_case__ , snake_case__ ) or not all( isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie A : dict[str, Any] = {} A : Union[str, Any] = '''WORD_KEEPER''' for word in words: A : Union[str, Any] = trie for c in word: if c not in trie_node: A : List[Any] = {} A : Tuple = trie_node[c] A : int = True A : str = len(snake_case__ ) # Dynamic programming method @functools.cache def is_breakable(snake_case__ ) -> bool: if index == len_string: return True A : Optional[int] = trie for i in range(snake_case__ , snake_case__ ): A : Dict = trie_node.get(string[i] , snake_case__ ) if trie_node is None: return False if trie_node.get(snake_case__ , snake_case__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
1
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) A : Optional[int] = precision A : str = ceil(precision / 14 ) A : List[str] = 42_6880 * Decimal(1_0005 ).sqrt() A : List[str] = 1 A : Tuple = 1359_1409 A : List[Any] = Decimal(snake_case__ ) for k in range(1 , snake_case__ ): A : List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case__ ) ** 3) linear_term += 5_4514_0134 exponential_term *= -26_2537_4126_4076_8000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowercase : int = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
3
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
1
'''simple docstring''' lowercase : Tuple = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} lowercase : Optional[Any] = ['a', 'b', 'c', 'd', 'e'] def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = start # add current to visited visited.append(snake_case__ ) A : Optional[Any] = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: A : str = topological_sort(snake_case__ , snake_case__ , snake_case__ ) # if all neighbors visited add current to sort sort.append(snake_case__ ) # if all vertices haven't been visited select a new one to visit if len(snake_case__ ) != len(snake_case__ ): for vertice in vertices: if vertice not in visited: A : int = topological_sort(snake_case__ , snake_case__ , snake_case__ ) # return sort return sort if __name__ == "__main__": lowercase : Optional[Any] = topological_sort('a', [], []) print(sort)
3
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
1
'''simple docstring''' from __future__ import annotations from random import choice def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return choice(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = random_pivot(snake_case__ ) # partition based on pivot # linear time A : List[Any] = [e for e in lst if e < pivot] A : List[str] = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(snake_case__ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(snake_case__ ) < k - 1: return kth_number(snake_case__ , k - len(snake_case__ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(snake_case__ , snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(snake_case__ ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if index == len(snake_case__ ): return True # Recursive Step for i in range(snake_case__ ): if valid_coloring(graph[index] , snake_case__ , snake_case__ ): # Color current vertex A : Any = i # Validate coloring if util_color(snake_case__ , snake_case__ , snake_case__ , index + 1 ): return True # Backtrack A : List[str] = -1 return False def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : int = [-1] * len(snake_case__ ) if util_color(snake_case__ , snake_case__ , snake_case__ , 0 ): return colored_vertices return []
3
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
1
'''simple docstring''' import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters lowercase : str = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ): '''simple docstring''' if "." in tensor_name: A : int = tensor_name.split('''.''' ) for split in splits[:-1]: A : Optional[Any] = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'{module} has no attribute {split}.' ) A : int = new_module A : Dict = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' ) A : List[str] = tensor_name in module._buffers A : Tuple = getattr(snake_case__ , snake_case__ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' ) A : Optional[int] = False A : Tuple = False if is_buffer or not is_bitsandbytes_available(): A : List[Any] = False A : Union[str, Any] = False else: A : Dict = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A : Tuple = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A : Union[str, Any] = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): A : Dict = value.to('''cpu''' ) if value.dtype == torch.inta: A : Optional[int] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A : Union[str, Any] = torch.tensor(snake_case__ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , snake_case__ ) and fpaa_statistics is None: A : Optional[Any] = new_value.T A : Any = old_value.__dict__ if is_abit: A : List[Any] = bnb.nn.IntaParams(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) elif is_abit: A : Optional[int] = bnb.nn.Paramsabit(snake_case__ , requires_grad=snake_case__ , **snake_case__ ).to(snake_case__ ) A : List[Any] = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(snake_case__ ) ) else: if value is None: A : Any = old_value.to(snake_case__ ) elif isinstance(snake_case__ , torch.Tensor ): A : List[Any] = value.to(snake_case__ ) else: A : List[Any] = torch.tensor(snake_case__ , device=snake_case__ ) if is_buffer: A : Union[str, Any] = new_value else: A : List[str] = nn.Parameter(snake_case__ , requires_grad=old_value.requires_grad ) A : str = new_value def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: A : Any = [] current_key_name.append(snake_case__ ) if (isinstance(snake_case__ , nn.Linear ) or isinstance(snake_case__ , snake_case__ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case__ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case__ , snake_case__ ): A, A : Tuple = module.weight.shape else: A : Optional[Any] = module.in_features A : Any = module.out_features if quantization_config.quantization_method() == "llm_int8": A : Dict = bnb.nn.LinearabitLt( snake_case__ , snake_case__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A : int = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A : List[Any] = bnb.nn.Linearabit( snake_case__ , snake_case__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A : int = True # Store the module class in case we need to transpose the weight later A : int = type(snake_case__ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case__ ) if len(list(module.children() ) ) > 0: A, A : int = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_been_replaced=snake_case__ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ): '''simple docstring''' A : Union[str, Any] = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A, A : Optional[Any] = _replace_with_bnb_linear( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def lowerCAmelCase_ ( *snake_case__ , **snake_case__ ): '''simple docstring''' warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , snake_case__ , ) return replace_with_bnb_linear(*snake_case__ , **snake_case__ ) def lowerCAmelCase_ ( *snake_case__ , **snake_case__ ): '''simple docstring''' warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , snake_case__ , ) return set_module_quantized_tensor_to_device(*snake_case__ , **snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : str = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A : List[str] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): A : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A : Tuple = sum(snake_case__ , [] ) A : Optional[int] = len(snake_case__ ) > 0 # Check if it is a base model A : Dict = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A : List[str] = list(model.named_children() ) A : List[Any] = [list_modules[-1][0]] # add last module together with tied weights A : Union[str, Any] = set(snake_case__ ) - set(snake_case__ ) A : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys A : List[Any] = ['''.weight''', '''.bias'''] A : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A : List[str] = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names
3
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
1
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase : List[Any] = logging.get_logger(__name__) lowercase : Any = {'vocab_file': 'spiece.model'} lowercase : int = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } lowercase : List[str] = { 'AI-Sweden/gpt-sw3-126m': 20_48, 'AI-Sweden/gpt-sw3-350m': 20_48, 'AI-Sweden/gpt-sw3-1.6b': 20_48, 'AI-Sweden/gpt-sw3-6.7b': 20_48, 'AI-Sweden/gpt-sw3-20b': 20_48, } class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" A : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs A : Optional[int] = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) A : List[Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing A : int = '''<|endoftext|>''' if eos_token is None else eos_token A : Dict = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: A : List[Any] = unk_token if pad_token is None else pad_token A : List[str] = eos_token if bos_token is None else bos_token else: A : Optional[Any] = '''<pad>''' if pad_token is None else pad_token A : Optional[int] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) A : List[str] = do_lower_case A : List[str] = remove_space A : Union[str, Any] = keep_accents A : Union[str, Any] = vocab_file A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off A : Any = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing A : List[str] = re.compile( F'[{"".join(map(SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self ) -> Dict: """simple docstring""" A : Tuple = self.__dict__.copy() A : List[Any] = None return state def __setstate__( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A : Any = {} A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def __lowerCAmelCase ( self ) -> int: """simple docstring""" return len(self.sp_model ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : List[str] = self.non_printing_characters_re.sub('''''' , SCREAMING_SNAKE_CASE ) # Normalize whitespaces A : Union[str, Any] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization A : Optional[int] = unicodedata.normalize('''NFC''' , SCREAMING_SNAKE_CASE ) return text def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Optional[int] = self.preprocess_text(SCREAMING_SNAKE_CASE ) return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return out_string def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Optional[int] = [] A : Optional[Any] = '''''' A : List[str] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token A : Dict = True A : int = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE ) A : List[Any] = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) return out_string def __lowerCAmelCase ( self ) -> Dict[str, int]: """simple docstring""" A : Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A : Tuple = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , '''wb''' ) as fi: A : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = self.preprocess_text(SCREAMING_SNAKE_CASE ) A : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE ) else: A : Any = [self.preprocess_text(SCREAMING_SNAKE_CASE ) for t in text] A : Optional[int] = self.sp_model.encode(SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": A : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE ) return token_ids def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.sp_model.decode(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[int]: """simple docstring""" A : int = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] A : Tuple = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(SCREAMING_SNAKE_CASE ) + F'{self.bos_token}Bot:' ) return self.encode(text=SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
1
'''simple docstring''' import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : Union[str, Any] = {'vocab_file': 'vocab.txt'} lowercase : List[Any] = { 'vocab_file': { 'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt', }, } lowercase : Optional[int] = { 'openbmb/cpm-ant-10b': 10_24, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Any = collections.OrderedDict() with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as reader: A : Dict = reader.readlines() for index, token in enumerate(snake_case__ ): A : str = token.rstrip('''\n''' ) A : Tuple = index return vocab class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE=200 ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = vocab A : Union[str, Any] = unk_token A : Optional[Any] = max_input_chars_per_word def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Optional[Any] = list(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word: return [self.unk_token] A : Union[str, Any] = 0 A : Optional[Any] = [] while start < len(SCREAMING_SNAKE_CASE ): A : Optional[Any] = len(SCREAMING_SNAKE_CASE ) A : int = None while start < end: A : Dict = ''''''.join(chars[start:end] ) if substr in self.vocab: A : List[Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(SCREAMING_SNAKE_CASE ) A : List[Any] = end return sub_tokens class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = False def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<d>" , SCREAMING_SNAKE_CASE="</d>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="</n>" , SCREAMING_SNAKE_CASE="</_>" , SCREAMING_SNAKE_CASE="left" , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''jieba'''] ) super().__init__( bod_token=SCREAMING_SNAKE_CASE , eod_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , line_token=SCREAMING_SNAKE_CASE , space_token=SCREAMING_SNAKE_CASE , padding_side=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : Dict = bod_token A : List[Any] = eod_token A : Optional[Any] = load_vocab(SCREAMING_SNAKE_CASE ) A : int = self.encoder[space_token] A : str = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE : x[1] ) ) A : Any = {v: k for k, v in self.encoder.items()} A : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return self.encoder[self.bod_token] @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self.encoder[self.eod_token] @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return self.encoder["\n"] @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return len(self.encoder ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Optional[Any] = [] for x in jieba.cut(SCREAMING_SNAKE_CASE , cut_all=SCREAMING_SNAKE_CASE ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) ) return output_tokens def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Optional[Any] = [i for i in token_ids if i >= 0] A : Dict = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return token in self.encoder def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return "".join(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if os.path.isdir(SCREAMING_SNAKE_CASE ): A : int = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: A : List[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory A : Optional[Any] = 0 if " " in self.encoder: A : List[str] = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: A : Any = self.encoder['''\n'''] del self.encoder["\n"] A : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda SCREAMING_SNAKE_CASE : x[1] ) ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ''' Please check that the vocabulary is not corrupted!''' ) A : List[str] = token_index writer.write(token + '''\n''' ) index += 1 return (vocab_file,) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE ))
3
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = '''ZinengTang/tvlt-base''' A : List[Any] = tempfile.mkdtemp() def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = self.get_image_processor() A : Tuple = self.get_feature_extractor() A : Any = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) A : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = self.get_image_processor() A : Optional[int] = self.get_feature_extractor() A : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = np.ones([12000] ) A : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : Dict = processor(audio=SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Tuple = self.get_image_processor() A : int = self.get_feature_extractor() A : int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) A : int = np.ones([3, 224, 224] ) A : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : Dict = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = self.get_image_processor() A : Union[str, Any] = self.get_feature_extractor() A : List[str] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) A : Dict = np.ones([12000] ) A : Tuple = np.ones([3, 224, 224] ) A : Optional[Any] = processor(audio=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE ): processor() def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[Any] = self.get_image_processor() A : str = self.get_feature_extractor() A : Optional[int] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
3
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
1
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } lowercase : List[str] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' for attribute in key.split('''.''' ): A : List[str] = getattr(snake_case__ , snake_case__ ) if weight_type is not None: A : Optional[Any] = getattr(snake_case__ , snake_case__ ).shape else: A : List[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": A : List[str] = value elif weight_type == "weight_g": A : Tuple = value elif weight_type == "weight_v": A : int = value elif weight_type == "bias": A : int = value else: A : List[str] = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = [] A : int = fairseq_model.state_dict() A : List[str] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): A : Optional[int] = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , ) A : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): A : Optional[Any] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue A : List[Any] = True if "*" in mapped_key: A : List[Any] = name.split(snake_case__ )[0].split('''.''' )[-2] A : Any = mapped_key.replace('''*''' , snake_case__ ) if "weight_g" in name: A : str = '''weight_g''' elif "weight_v" in name: A : Dict = '''weight_v''' elif "bias" in name: A : Union[str, Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj A : Optional[Any] = '''weight''' else: A : Any = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F'Unused weights: {unused_weights}' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = full_name.split('''conv_layers.''' )[-1] A : Any = name.split('''.''' ) A : Dict = int(items[0] ) A : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) A : Any = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) A : str = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' ) A : List[Any] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) A : List[str] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ): '''simple docstring''' if config_path is not None: A : List[str] = UniSpeechSatConfig.from_pretrained(snake_case__ ) else: A : Optional[int] = UniSpeechSatConfig() A : int = '''''' if is_finetuned: A : List[str] = UniSpeechSatForCTC(snake_case__ ) else: A : int = UniSpeechSatForPreTraining(snake_case__ ) A, A, A : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) A : List[str] = model[0].eval() recursively_load_weights(snake_case__ , snake_case__ ) hf_wavavec.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) lowercase : str = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
3
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[Any] = logging.get_logger(__name__) lowercase : List[str] = { 'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json', } class A ( __snake_case ): __magic_name__ = '''timesformer''' def __init__( self , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="divided_space_time" , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[Any] = image_size A : Any = patch_size A : List[str] = num_channels A : Tuple = num_frames A : Union[str, Any] = hidden_size A : Tuple = num_hidden_layers A : Any = num_attention_heads A : Optional[int] = intermediate_size A : List[Any] = hidden_act A : Optional[Any] = hidden_dropout_prob A : List[str] = attention_probs_dropout_prob A : Any = initializer_range A : Optional[int] = layer_norm_eps A : Optional[int] = qkv_bias A : List[Any] = attention_type A : Optional[Any] = drop_path_rate
3
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
1
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig UpperCAmelCase__ = logging.get_logger(__name__) # General docstring UpperCAmelCase__ = "PoolFormerConfig" # Base docstring UpperCAmelCase__ = "sail/poolformer_s12" UpperCAmelCase__ = [1, 512, 7, 7] # Image classification docstring UpperCAmelCase__ = "sail/poolformer_s12" UpperCAmelCase__ = "tabby, tabby cat" UpperCAmelCase__ = [ "sail/poolformer_s12", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def _a ( a :Dict , a :float = 0.0 , a :bool = False ) -> Optional[Any]: if drop_prob == 0.0 or not training: return input a = 1 - drop_prob a = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets a = keep_prob + torch.rand(a , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize a = input.div(a ) * random_tensor return output class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : int , __UpperCAmelCase : Optional[float] = None ) ->None: """simple docstring""" super().__init__() a = drop_prob def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : torch.Tensor ) ->torch.Tensor: """simple docstring""" return drop_path(__UpperCAmelCase , self.drop_prob , self.training ) def __lowerCAmelCase ( self : int ) ->str: """simple docstring""" return "p={}".format(self.drop_prob ) class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any=None ) ->int: """simple docstring""" super().__init__() a = patch_size if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size) a = stride if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (stride, stride) a = padding if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (padding, padding) a = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase ) a = norm_layer(__UpperCAmelCase ) if norm_layer else nn.Identity() def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] ) ->List[str]: """simple docstring""" a = self.projection(__UpperCAmelCase ) a = self.norm(__UpperCAmelCase ) return embeddings class lowercase_ ( nn.GroupNorm ): '''simple docstring''' def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Union[str, Any] ) ->Dict: """simple docstring""" super().__init__(1 , __UpperCAmelCase , **__UpperCAmelCase ) class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : int , __UpperCAmelCase : Optional[Any] ) ->List[Any]: """simple docstring""" super().__init__() a = nn.AvgPoolad(__UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__UpperCAmelCase ) def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[str] ) ->Union[str, Any]: """simple docstring""" return self.pool(__UpperCAmelCase ) - hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Dict ) ->List[Any]: """simple docstring""" super().__init__() a = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) a = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 ) a = PoolFormerDropPath(__UpperCAmelCase ) if isinstance(config.hidden_act , __UpperCAmelCase ): a = ACTaFN[config.hidden_act] else: a = config.hidden_act def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] ) ->List[Any]: """simple docstring""" a = self.conva(__UpperCAmelCase ) a = self.act_fn(__UpperCAmelCase ) a = self.drop(__UpperCAmelCase ) a = self.conva(__UpperCAmelCase ) a = self.drop(__UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) ->int: """simple docstring""" super().__init__() a = PoolFormerPooling(__UpperCAmelCase ) a = PoolFormerOutput(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) a = PoolFormerGroupNorm(__UpperCAmelCase ) a = PoolFormerGroupNorm(__UpperCAmelCase ) # Useful for training neural nets a = PoolFormerDropPath(__UpperCAmelCase ) if drop_path > 0.0 else nn.Identity() a = config.use_layer_scale if config.use_layer_scale: a = nn.Parameter( config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase ) a = nn.Parameter( config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase ) def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict ) ->Any: """simple docstring""" if self.use_layer_scale: a = self.pooling(self.before_norm(__UpperCAmelCase ) ) a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection a = hidden_states + self.drop_path(__UpperCAmelCase ) a = () a = self.output(self.after_norm(__UpperCAmelCase ) ) a = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection a = hidden_states + self.drop_path(__UpperCAmelCase ) a = (output,) + outputs return outputs else: a = self.drop_path(self.pooling(self.before_norm(__UpperCAmelCase ) ) ) # First residual connection a = pooling_output + hidden_states a = () # Second residual connection inside the PoolFormerOutput block a = self.drop_path(self.output(self.after_norm(__UpperCAmelCase ) ) ) a = hidden_states + layer_output a = (output,) + outputs return outputs class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , __UpperCAmelCase : Optional[int] ) ->Dict: """simple docstring""" super().__init__() a = config # stochastic depth decay rule a = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings a = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) a = nn.ModuleList(__UpperCAmelCase ) # Transformer blocks a = [] a = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers a = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__UpperCAmelCase ) ) a = nn.ModuleList(__UpperCAmelCase ) def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Tuple=True ) ->Union[str, Any]: """simple docstring""" a = () if output_hidden_states else None a = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): a , a = layers # Get patch embeddings from hidden_states a = embedding_layer(__UpperCAmelCase ) # Send the embeddings through the blocks for _, blk in enumerate(__UpperCAmelCase ): a = blk(__UpperCAmelCase ) a = layer_outputs[0] if output_hidden_states: a = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase ) class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = PoolFormerConfig __snake_case = '''poolformer''' __snake_case = '''pixel_values''' __snake_case = True def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str] ) ->Optional[Any]: """simple docstring""" if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__UpperCAmelCase , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]=False ) ->int: """simple docstring""" if isinstance(__UpperCAmelCase , __UpperCAmelCase ): a = value UpperCAmelCase__ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase__ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n" @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , lowercase , ) class lowercase_ ( lowercase ): '''simple docstring''' def __init__( self : str , __UpperCAmelCase : int ) ->Optional[Any]: """simple docstring""" super().__init__(__UpperCAmelCase ) a = config a = PoolFormerEncoder(__UpperCAmelCase ) # Initialize weights and apply final processing self.post_init() def __lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) ->Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) a = self.encoder( __UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , ) a = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) class lowercase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , __UpperCAmelCase : Dict ) ->List[str]: """simple docstring""" super().__init__() a = nn.Linear(config.hidden_size , config.hidden_size ) def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[Any] ) ->Union[str, Any]: """simple docstring""" a = self.dense(__UpperCAmelCase ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , lowercase , ) class lowercase_ ( lowercase ): '''simple docstring''' def __init__( self : Union[str, Any] , __UpperCAmelCase : List[str] ) ->Any: """simple docstring""" super().__init__(__UpperCAmelCase ) a = config.num_labels a = PoolFormerModel(__UpperCAmelCase ) # Final norm a = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head a = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ) ->Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" a = return_dict if return_dict is not None else self.config.use_return_dict a = self.poolformer( __UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , ) a = outputs[0] a = self.classifier(self.norm(__UpperCAmelCase ).mean([-2, -1] ) ) a = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: a = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): a = '''single_label_classification''' else: a = '''multi_label_classification''' if self.config.problem_type == "regression": a = MSELoss() if self.num_labels == 1: a = loss_fct(logits.squeeze() , labels.squeeze() ) else: a = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": a = CrossEntropyLoss() a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": a = BCEWithLogitsLoss() a = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
0
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
'''simple docstring''' SCREAMING_SNAKE_CASE_: str =2_56 # Modulus to hash a string SCREAMING_SNAKE_CASE_: int =1_00_00_03 def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> bool: '''simple docstring''' UpperCAmelCase_ = len(snake_case_ ) UpperCAmelCase_ = len(snake_case_ ) if p_len > t_len: return False UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 UpperCAmelCase_ = 1 # Calculating the hash of pattern and substring of text for i in range(snake_case_ ): UpperCAmelCase_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus UpperCAmelCase_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue UpperCAmelCase_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash UpperCAmelCase_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = "abc1abc12" UpperCAmelCase_ = "alskfjaldsabc1abc1abc12k23adsfabcabc" UpperCAmelCase_ = "alskfjaldsk23adsfabcabc" assert rabin_karp(snake_case_ , snake_case_ ) and not rabin_karp(snake_case_ , snake_case_ ) # Test 2) UpperCAmelCase_ = "ABABX" UpperCAmelCase_ = "ABABZABABYABABX" assert rabin_karp(snake_case_ , snake_case_ ) # Test 3) UpperCAmelCase_ = "AAAB" UpperCAmelCase_ = "ABAAAAAB" assert rabin_karp(snake_case_ , snake_case_ ) # Test 4) UpperCAmelCase_ = "abcdabcy" UpperCAmelCase_ = "abcxabcdabxabcdabcdabcy" assert rabin_karp(snake_case_ , snake_case_ ) # Test 5) UpperCAmelCase_ = "Lü" UpperCAmelCase_ = "Lüsai" assert rabin_karp(snake_case_ , snake_case_ ) UpperCAmelCase_ = "Lue" assert not rabin_karp(snake_case_ , snake_case_ ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase : Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
'''simple docstring''' def a_ ( lowerCamelCase : list ): if len(lowerCamelCase ) <= 1: return lst lowerCAmelCase = 1 while i < len(lowerCamelCase ): if lst[i - 1] <= lst[i]: i += 1 else: lowerCAmelCase , lowerCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: lowerCAmelCase = 1 return lst if __name__ == "__main__": __snake_case =input("""Enter numbers separated by a comma:\n""").strip() __snake_case =[int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
4
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
5
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging A : Dict = logging.get_logger(__name__) class __A( a ): snake_case_ = '''encoder-decoder''' snake_case_ = True def __init__( self , **_snake_case ) -> str: '''simple docstring''' super().__init__(**_snake_case ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" __a = kwargs.pop('''encoder''' ) __a = encoder_config.pop('''model_type''' ) __a = kwargs.pop('''decoder''' ) __a = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig __a = AutoConfig.for_model(_snake_case , **_snake_case ) __a = AutoConfig.for_model(_snake_case , **_snake_case ) __a = True @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> PretrainedConfig: '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) __a = True __a = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) __a = self.encoder.to_dict() __a = self.decoder.to_dict() __a = self.__class__.model_type return output
6
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase_ = logging.get_logger(__name__) lowercase_ = Dict[str, Any] lowercase_ = List[Prediction] @add_end_docstrings(_UpperCAmelCase ) class A ( _UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[int],*lowercase_ : Union[str, Any],**lowercase_ : Tuple )-> int: '''simple docstring''' super().__init__(*lowercase_,**lowercase_ ) if self.framework == "tf": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) requires_backends(self,'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def snake_case__ ( self : Union[str, Any],**lowercase_ : int )-> Any: '''simple docstring''' A__ = {} if "threshold" in kwargs: A__ = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self : List[str],*lowercase_ : int,**lowercase_ : Union[str, Any] )-> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ ) def snake_case__ ( self : Tuple,lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = load_image(lowercase_ ) A__ = torch.IntTensor([[image.height, image.width]] ) A__ = self.image_processor(images=[image],return_tensors='pt' ) if self.tokenizer is not None: A__ = self.tokenizer(text=inputs['words'],boxes=inputs['boxes'],return_tensors='pt' ) A__ = target_size return inputs def snake_case__ ( self : Dict,lowercase_ : int )-> str: '''simple docstring''' A__ = model_inputs.pop('target_size' ) A__ = self.model(**lowercase_ ) A__ = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: A__ = model_inputs['bbox'] return model_outputs def snake_case__ ( self : List[str],lowercase_ : Tuple,lowercase_ : Union[str, Any]=0.9 )-> List[str]: '''simple docstring''' A__ = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A__ , A__ = target_size[0].tolist() def unnormalize(lowercase_ : List[str] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_0_0_0), (height * bbox[1] / 1_0_0_0), (width * bbox[2] / 1_0_0_0), (height * bbox[3] / 1_0_0_0), ] ) ) A__ , A__ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A__ = [unnormalize(lowercase_ ) for bbox in model_outputs['bbox'].squeeze(0 )] A__ = ['score', 'label', 'box'] A__ = [dict(zip(lowercase_,lowercase_ ) ) for vals in zip(scores.tolist(),lowercase_,lowercase_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A__ = self.image_processor.post_process_object_detection(lowercase_,lowercase_,lowercase_ ) A__ = raw_annotations[0] A__ = raw_annotation['scores'] A__ = raw_annotation['labels'] A__ = raw_annotation['boxes'] A__ = scores.tolist() A__ = [self.model.config.idalabel[label.item()] for label in labels] A__ = [self._get_bounding_box(lowercase_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A__ = ['score', 'label', 'box'] A__ = [ dict(zip(lowercase_,lowercase_ ) ) for vals in zip(raw_annotation['scores'],raw_annotation['labels'],raw_annotation['boxes'] ) ] return annotation def snake_case__ ( self : Optional[int],lowercase_ : "torch.Tensor" )-> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) A__ , A__ , A__ , A__ = box.int().tolist() A__ = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
7
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if hor == 128: snake_case_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') snake_case_ = (32, 128, 256) snake_case_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''') elif hor == 32: snake_case_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''') snake_case_ = (32, 64, 128, 256) snake_case_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''') snake_case_ = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) snake_case_ = model.state_dict() snake_case_ = { '''down_block_types''': down_block_types, '''block_out_channels''': block_out_channels, '''up_block_types''': up_block_types, '''layers_per_block''': 1, '''use_timestep_embedding''': True, '''out_block_type''': '''OutConv1DBlock''', '''norm_num_groups''': 8, '''downsample_each_block''': False, '''in_channels''': 14, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''sample_size''': 65536, '''mid_block_type''': '''MidResTemporalBlock1D''', '''act_fn''': '''mish''', } snake_case_ = UNetaDModel(**SCREAMING_SNAKE_CASE__ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (): snake_case_ = { '''in_channels''': 14, '''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''), '''up_block_types''': (), '''out_block_type''': '''ValueFunction''', '''mid_block_type''': '''ValueFunctionMidBlock1D''', '''block_out_channels''': (32, 64, 128, 256), '''layers_per_block''': 1, '''downsample_each_block''': True, '''sample_size''': 65536, '''out_channels''': 14, '''extra_in_channels''': 0, '''time_embedding_type''': '''positional''', '''use_timestep_embedding''': True, '''flip_sin_to_cos''': False, '''freq_shift''': 1, '''norm_num_groups''': 8, '''act_fn''': '''mish''', } snake_case_ = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' ) snake_case_ = model snake_case_ = UNetaDModel(**SCREAMING_SNAKE_CASE__ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE__ ) torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' ) with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
8
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
0
from math import sqrt def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" __SCREAMING_SNAKE_CASE : str = True # 0 and 1 are none primes. if number <= 1: __SCREAMING_SNAKE_CASE : Any = False for divisor in range(2 , int(round(sqrt(lowercase__ ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __SCREAMING_SNAKE_CASE : List[Any] = False break # precondition assert isinstance(lowercase__ , lowercase__ ), "'status' must been from type bool" return status def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __SCREAMING_SNAKE_CASE : Optional[Any] = list(range(2 , n + 1 ) ) __SCREAMING_SNAKE_CASE : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowercase__ ) ): for j in range(i + 1 , len(lowercase__ ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __SCREAMING_SNAKE_CASE : List[Any] = 0 # filters actual prime numbers. __SCREAMING_SNAKE_CASE : Tuple = [x for x in begin_list if x != 0] # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list" return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and (n > 2), "'N' must been an int and > 2" __SCREAMING_SNAKE_CASE : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(lowercase__ ): ans.append(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list" return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and number >= 0, "'number' must been an int and >= 0" __SCREAMING_SNAKE_CASE : str = [] # this list will be returns of the function. # potential prime number factors. __SCREAMING_SNAKE_CASE : Union[str, Any] = 2 __SCREAMING_SNAKE_CASE : Any = number if number == 0 or number == 1: ans.append(lowercase__ ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowercase__ ): while quotient != 1: if is_prime(lowercase__ ) and (quotient % factor == 0): ans.append(lowercase__ ) quotient /= factor else: factor += 1 else: ans.append(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type list" return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" __SCREAMING_SNAKE_CASE : Optional[Any] = 0 # prime factorization of 'number' __SCREAMING_SNAKE_CASE : Optional[Any] = prime_factorization(lowercase__ ) __SCREAMING_SNAKE_CASE : Tuple = max(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int" return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' bust been an int and >= 0" __SCREAMING_SNAKE_CASE : int = 0 # prime factorization of 'number' __SCREAMING_SNAKE_CASE : List[Any] = prime_factorization(lowercase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ), "'ans' must been from type int" return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int" assert isinstance(number % 2 == 0 , lowercase__ ), "compare bust been from type bool" return number % 2 == 0 def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ), "'number' must been an int" assert isinstance(number % 2 != 0 , lowercase__ ), "compare bust been from type bool" return number % 2 != 0 def _UpperCamelCase ( lowercase__ ): assert ( isinstance(lowercase__ , lowercase__ ) and (number > 2) and is_even(lowercase__ ) ), "'number' must been an int, even and > 2" __SCREAMING_SNAKE_CASE : Tuple = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __SCREAMING_SNAKE_CASE : Optional[Any] = get_prime_numbers(lowercase__ ) __SCREAMING_SNAKE_CASE : Tuple = len(lowercase__ ) # run variable for while-loops. __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : Tuple = None # exit variable. for break up the loops __SCREAMING_SNAKE_CASE : Any = True while i < len_pn and loop: __SCREAMING_SNAKE_CASE : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __SCREAMING_SNAKE_CASE : List[str] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(lowercase__ , lowercase__ ) and (len(lowercase__ ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def _UpperCamelCase ( lowercase__ , lowercase__ ): assert ( isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __SCREAMING_SNAKE_CASE : Any = 0 while numbera != 0: __SCREAMING_SNAKE_CASE : List[Any] = numbera % numbera __SCREAMING_SNAKE_CASE : Optional[int] = numbera __SCREAMING_SNAKE_CASE : Optional[int] = rest # precondition assert isinstance(lowercase__ , lowercase__ ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def _UpperCamelCase ( lowercase__ , lowercase__ ): assert ( isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __SCREAMING_SNAKE_CASE : Dict = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __SCREAMING_SNAKE_CASE : Any = prime_factorization(lowercase__ ) __SCREAMING_SNAKE_CASE : Dict = prime_factorization(lowercase__ ) elif numbera == 1 or numbera == 1: __SCREAMING_SNAKE_CASE : List[str] = [] __SCREAMING_SNAKE_CASE : List[Any] = [] __SCREAMING_SNAKE_CASE : Optional[Any] = max(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : Any = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __SCREAMING_SNAKE_CASE : int = prime_fac_a.count(lowercase__ ) __SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ ) for _ in range(max(lowercase__ , lowercase__ ) ): ans *= n else: __SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ ) for _ in range(lowercase__ ): ans *= n done.append(lowercase__ ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __SCREAMING_SNAKE_CASE : str = prime_fac_a.count(lowercase__ ) for _ in range(lowercase__ ): ans *= n done.append(lowercase__ ) # precondition assert isinstance(lowercase__ , lowercase__ ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'number' must been a positive int" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowercase__ ): ans += 1 # precondition assert isinstance(lowercase__ , lowercase__ ) and is_prime( lowercase__ ), "'ans' must been a prime number and from type int" return ans def _UpperCamelCase ( lowercase__ , lowercase__ ): assert ( is_prime(lowercase__ ) and is_prime(lowercase__ ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __SCREAMING_SNAKE_CASE : Optional[Any] = p_number_a + 1 # jump to the next number __SCREAMING_SNAKE_CASE : Optional[Any] = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowercase__ ): number += 1 while number < p_number_a: ans.append(lowercase__ ) number += 1 # fetch the next prime number. while not is_prime(lowercase__ ): number += 1 # precondition assert ( isinstance(lowercase__ , lowercase__ ) and ans[0] != p_number_a and ans[len(lowercase__ ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and (n >= 1), "'n' must been int and >= 1" __SCREAMING_SNAKE_CASE : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(lowercase__ ) # precondition assert ans[0] == 1 and ans[len(lowercase__ ) - 1] == n, "Error in function getDivisiors(...)" return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and ( number > 1 ), "'number' must been an int and >= 1" __SCREAMING_SNAKE_CASE : str = get_divisors(lowercase__ ) # precondition assert ( isinstance(lowercase__ , lowercase__ ) and (divisors[0] == 1) and (divisors[len(lowercase__ ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def _UpperCamelCase ( lowercase__ , lowercase__ ): assert ( isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __SCREAMING_SNAKE_CASE : Optional[Any] = gcd(abs(lowercase__ ) , abs(lowercase__ ) ) # precondition assert ( isinstance(lowercase__ , lowercase__ ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been a int and >= 0" __SCREAMING_SNAKE_CASE : Any = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def _UpperCamelCase ( lowercase__ ): assert isinstance(lowercase__ , lowercase__ ) and (n >= 0), "'n' must been an int and >= 0" __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 __SCREAMING_SNAKE_CASE : Optional[int] = 1 # this will be return for _ in range(n - 1 ): __SCREAMING_SNAKE_CASE : Optional[Any] = ans ans += fiba __SCREAMING_SNAKE_CASE : Optional[Any] = tmp return ans
9
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable __A = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["DPTFeatureExtractor"] __A = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
10
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ): _A , _A : Any = image.size _A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) _A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 _A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 ) _A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class lowerCAmelCase__ ( a): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]: super().__init__() self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase) @torch.no_grad() def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Tuple = 1 elif isinstance(__lowerCamelCase , torch.Tensor): _A : Union[str, Any] = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}") if isinstance(__lowerCamelCase , PIL.Image.Image): _A : Union[str, Any] = preprocess(__lowerCamelCase) _A , _A : Union[str, Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width) _A : str = next(self.unet.parameters()).dtype _A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase) _A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase) # set timesteps and move to the correct device self.scheduler.set_timesteps(__lowerCamelCase , device=self.device) _A : Any = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) _A : Optional[int] = {} if accepts_eta: _A : List[Any] = eta for t in self.progress_bar(__lowerCamelCase): # concat latents and low resolution image in the channel dimension. _A : List[Any] = torch.cat([latents, image] , dim=1) _A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase) # predict the noise residual _A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample # compute the previous noisy sample x_t -> x_t-1 _A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample # decode the image latents with the VQVAE _A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample _A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0) _A : Tuple = image / 2 + 0.5 _A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _A : Optional[int] = self.numpy_to_pil(__lowerCamelCase) if not return_dict: return (image,) return ImagePipelineOutput(images=__lowerCamelCase)
11
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
0
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase_ = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } UpperCAmelCase_ = { 'Salesforce/codegen-350M-mono': 2_048, } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : List[Any] = ['input_ids', 'attention_mask'] UpperCAmelCase__ : Dict = CodeGenTokenizer def __init__( self: Tuple , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: Optional[int]="<|endoftext|>" , UpperCamelCase_: Union[str, Any]="<|endoftext|>" , UpperCamelCase_: Union[str, Any]="<|endoftext|>" , UpperCamelCase_: Any=False , **UpperCamelCase_: Tuple , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) if kwargs.pop("""add_bos_token""" , UpperCamelCase_ ): __lowerCamelCase = kwargs.pop("""name_or_path""" , """""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n' F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n' """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) __lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space: __lowerCamelCase = getattr(UpperCamelCase_ , pre_tok_state.pop("""type""" ) ) __lowerCamelCase = add_prefix_space __lowerCamelCase = pre_tok_class(**UpperCamelCase_ ) __lowerCamelCase = add_prefix_space def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: List[str] ): __lowerCamelCase = kwargs.get("""is_split_into_words""" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , *UpperCamelCase_: Any , **UpperCamelCase_: List[str] ): __lowerCamelCase = kwargs.get("""is_split_into_words""" , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ): __lowerCamelCase = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase_: bool = False , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[List[str]] = None , **UpperCamelCase_: Union[str, Any] , ): __lowerCamelCase = super().decode( token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , ) if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0: __lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) return decoded_text def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: Any ): def find_re(UpperCamelCase_: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ): __lowerCamelCase = pattern.search(UpperCamelCase_ , UpperCamelCase_ ) return m.start() if m else -1 __lowerCamelCase = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern] __lowerCamelCase = list(re.finditer("""^print""" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __lowerCamelCase = completion[: prints[1].start()] __lowerCamelCase = list(re.finditer("""^def""" , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __lowerCamelCase = completion[: defs[1].start()] __lowerCamelCase = 0 __lowerCamelCase = [ pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase_ ) > 0: return completion[: min(UpperCamelCase_ )] else: return completion
12
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase : str = { """configuration_speecht5""": [ """SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""", """SpeechT5Config""", """SpeechT5HifiGanConfig""", ], """feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""], """processing_speecht5""": ["""SpeechT5Processor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = ["""SpeechT5Tokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """SpeechT5ForSpeechToText""", """SpeechT5ForSpeechToSpeech""", """SpeechT5ForTextToSpeech""", """SpeechT5Model""", """SpeechT5PreTrainedModel""", """SpeechT5HifiGan""", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[int] , *UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__) A__ = eval_examples A__ = post_process_function def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[Dataset] = None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "eval" , **UpperCAmelCase__ : List[str] , ) ->Dict[str, float]: '''simple docstring''' A__ = gen_kwargs.copy() A__ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) A__ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) A__ = gen_kwargs A__ = self.eval_dataset if eval_dataset is None else eval_dataset A__ = self.get_eval_dataloader(UpperCAmelCase__) A__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. A__ = self.compute_metrics A__ = None A__ = time.time() A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A__ = eval_loop( UpperCAmelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , ) finally: A__ = compute_metrics A__ = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default A__ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) A__ = self.compute_metrics(UpperCAmelCase__) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"""{metric_key_prefix}_"""): A__ = metrics.pop(UpperCAmelCase__) metrics.update(output.metrics) else: A__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCAmelCase__) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__) return metrics def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str = "test" , **UpperCAmelCase__ : int) ->int: '''simple docstring''' A__ = gen_kwargs.copy() A__ = self.get_test_dataloader(UpperCAmelCase__) # Temporarily disable metric computation, we will do it in the loop here. A__ = self.compute_metrics A__ = None A__ = time.time() A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A__ = eval_loop( UpperCAmelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , ) finally: A__ = compute_metrics A__ = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output A__ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , '''predict''') A__ = self.compute_metrics(UpperCAmelCase__) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"""{metric_key_prefix}_"""): A__ = metrics.pop(UpperCAmelCase__) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__)
14
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
0
SCREAMING_SNAKE_CASE :int = { 0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', } def UpperCAmelCase ( a_ ) -> str: """simple docstring""" assert type(a_ ) in (int, float) and decimal == int(a_ ) __A = int(a_ ) __A = "" __A = False if decimal < 0: __A = True decimal *= -1 while decimal > 0: __A , __A = divmod(a_ , 1_6 ) __A = values[remainder] + hexadecimal __A = "0x" + hexadecimal if negative: __A = "-" + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
15
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
0
"""simple docstring""" from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('''socket.socket''' ) @patch('''builtins.open''' ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> int: # ===== initialization ===== lowercase__ : List[str] = Mock() lowercase__ : Any = conn, Mock() lowercase__ : Optional[int] = iter([1, None] ) lowercase__ : Union[str, Any] = lambda __lowerCamelCase : next(__lowerCamelCase ) # ===== invoke ===== send_file(filename='''mytext.txt''' , testing=__lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
16
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
0
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _a = False _a = logging.get_logger(__name__) _a = 'ybelkada/fonts' def _A ( ) -> int: '''simple docstring''' if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """ "Pix2StructImageProcessor. Please upgrade torch.") def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Optional[Any]) -> int: '''simple docstring''' requires_backends(UpperCamelCase_, ["torch"]) _check_torch_version() __lowercase = image_tensor.unsqueeze(0) __lowercase = torch.nn.functional.unfold(UpperCamelCase_, (patch_height, patch_width), stride=(patch_height, patch_width)) __lowercase = patches.reshape(image_tensor.size(0), image_tensor.size(1), UpperCamelCase_, UpperCamelCase_, -1) __lowercase = patches.permute(0, 4, 2, 3, 1).reshape( image_tensor.size(2) // patch_height, image_tensor.size(3) // patch_width, image_tensor.size(1) * patch_height * patch_width, ) return patches.unsqueeze(0) def _A ( UpperCamelCase_ : str, UpperCamelCase_ : int = 36, UpperCamelCase_ : str = "black", UpperCamelCase_ : str = "white", UpperCamelCase_ : int = 5, UpperCamelCase_ : int = 5, UpperCamelCase_ : int = 5, UpperCamelCase_ : int = 5, UpperCamelCase_ : Optional[bytes] = None, UpperCamelCase_ : Optional[str] = None, ) -> Image.Image: '''simple docstring''' requires_backends(UpperCamelCase_, "vision") # Add new lines so that each line is no more than 80 characters. __lowercase = textwrap.TextWrapper(width=80) __lowercase = wrapper.wrap(text=UpperCamelCase_) __lowercase = "\n".join(UpperCamelCase_) if font_bytes is not None and font_path is None: __lowercase = io.BytesIO(UpperCamelCase_) elif font_path is not None: __lowercase = font_path else: __lowercase = hf_hub_download(UpperCamelCase_, "Arial.TTF") __lowercase = ImageFont.truetype(UpperCamelCase_, encoding="UTF-8", size=UpperCamelCase_) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. __lowercase = ImageDraw.Draw(Image.new("RGB", (1, 1), UpperCamelCase_)) __lowercase ,__lowercase ,__lowercase ,__lowercase = temp_draw.textbbox((0, 0), UpperCamelCase_, UpperCamelCase_) # Create the actual image with a bit of padding around the text. __lowercase = text_width + left_padding + right_padding __lowercase = text_height + top_padding + bottom_padding __lowercase = Image.new("RGB", (image_width, image_height), UpperCamelCase_) __lowercase = ImageDraw.Draw(UpperCamelCase_) draw.text(xy=(left_padding, top_padding), text=UpperCamelCase_, fill=UpperCamelCase_, font=UpperCamelCase_) return image def _A ( UpperCamelCase_ : np.ndarray, UpperCamelCase_ : str, **UpperCamelCase_ : Union[str, Any]) -> List[str]: '''simple docstring''' requires_backends(UpperCamelCase_, "vision") # Convert to PIL image if necessary __lowercase = to_pil_image(UpperCamelCase_) __lowercase = render_text(UpperCamelCase_, **UpperCamelCase_) __lowercase = max(header_image.width, image.width) __lowercase = int(image.height * (new_width / image.width)) __lowercase = int(header_image.height * (new_width / header_image.width)) __lowercase = Image.new("RGB", (new_width, new_height + new_header_height), "white") new_image.paste(header_image.resize((new_width, new_header_height)), (0, 0)) new_image.paste(image.resize((new_width, new_height)), (0, new_header_height)) # Convert back to the original framework if necessary __lowercase = to_numpy_array(UpperCamelCase_) if infer_channel_dimension_format(UpperCamelCase_) == ChannelDimension.LAST: __lowercase = to_channel_dimension_format(UpperCamelCase_, ChannelDimension.LAST) return new_image class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : List[Any] = ["flattened_patches"] def __init__( self : List[str], UpperCAmelCase__ : bool = True, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : int = 2_0_4_8, UpperCAmelCase__ : bool = False, **UpperCAmelCase__ : Dict, ): super().__init__(**UpperCAmelCase__ ) __lowercase = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} __lowercase = do_normalize __lowercase = do_convert_rgb __lowercase = max_patches __lowercase = is_vqa def _lowercase ( self : Optional[int], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : int, UpperCAmelCase__ : dict, **UpperCAmelCase__ : Dict ): requires_backends(self.extract_flattened_patches, "torch" ) _check_torch_version() # convert to torch __lowercase = to_channel_dimension_format(UpperCAmelCase__, ChannelDimension.FIRST ) __lowercase = torch.from_numpy(UpperCAmelCase__ ) __lowercase ,__lowercase = patch_size["height"], patch_size["width"] __lowercase ,__lowercase = get_image_size(UpperCAmelCase__ ) # maximize scale s.t. __lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) ) __lowercase = max(min(math.floor(scale * image_height / patch_height ), UpperCAmelCase__ ), 1 ) __lowercase = max(min(math.floor(scale * image_width / patch_width ), UpperCAmelCase__ ), 1 ) __lowercase = max(num_feasible_rows * patch_height, 1 ) __lowercase = max(num_feasible_cols * patch_width, 1 ) __lowercase = torch.nn.functional.interpolate( image.unsqueeze(0 ), size=(resized_height, resized_width), mode="bilinear", align_corners=UpperCAmelCase__, antialias=UpperCAmelCase__, ).squeeze(0 ) # [1, rows, columns, patch_height * patch_width * image_channels] __lowercase = torch_extract_patches(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = patches.shape __lowercase = patches_shape[1] __lowercase = patches_shape[2] __lowercase = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] __lowercase = patches.reshape([rows * columns, depth] ) # [rows * columns, 1] __lowercase = torch.arange(UpperCAmelCase__ ).reshape([rows, 1] ).repeat(1, UpperCAmelCase__ ).reshape([rows * columns, 1] ) __lowercase = torch.arange(UpperCAmelCase__ ).reshape([1, columns] ).repeat(UpperCAmelCase__, 1 ).reshape([rows * columns, 1] ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] __lowercase = row_ids.to(torch.floataa ) __lowercase = col_ids.to(torch.floataa ) # [rows * columns, 2 + patch_height * patch_width * image_channels] __lowercase = torch.cat([row_ids, col_ids, patches], -1 ) # [max_patches, 2 + patch_height * patch_width * image_channels] __lowercase = torch.nn.functional.pad(UpperCAmelCase__, [0, 0, 0, max_patches - (rows * columns)] ).float() __lowercase = to_numpy_array(UpperCAmelCase__ ) return result def _lowercase ( self : List[Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Any ): if image.dtype == np.uinta: __lowercase = image.astype(np.floataa ) # take mean across the whole `image` __lowercase = np.mean(UpperCAmelCase__ ) __lowercase = np.std(UpperCAmelCase__ ) __lowercase = max(UpperCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape ) ) ) return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, **UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST, **UpperCAmelCase__ : Any, ): __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase = patch_size if patch_size is not None else self.patch_size __lowercase = max_patches if max_patches is not None else self.max_patches __lowercase = self.is_vqa if kwargs.get("data_format", UpperCAmelCase__ ) is not None: raise ValueError("data_format is not an accepted input as the outputs are " ) __lowercase = make_list_of_images(UpperCAmelCase__ ) if not valid_images(UpperCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase = [convert_to_rgb(UpperCAmelCase__ ) for image in images] # All transformations expect numpy arrays. __lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images] if is_vqa: if header_text is None: raise ValueError("A header text must be provided for VQA models." ) __lowercase = kwargs.pop("font_bytes", UpperCAmelCase__ ) __lowercase = kwargs.pop("font_path", UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ): __lowercase = [header_text] * len(UpperCAmelCase__ ) __lowercase = [ render_header(UpperCAmelCase__, header_text[i], font_bytes=UpperCAmelCase__, font_path=UpperCAmelCase__ ) for i, image in enumerate(UpperCAmelCase__ ) ] if do_normalize: __lowercase = [self.normalize(image=UpperCAmelCase__ ) for image in images] # convert to torch tensor and permute __lowercase = [ self.extract_flattened_patches(image=UpperCAmelCase__, max_patches=UpperCAmelCase__, patch_size=UpperCAmelCase__ ) for image in images ] # create attention mask in numpy __lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images] __lowercase = BatchFeature( data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=UpperCAmelCase__ ) return encoded_outputs
17
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
0
from math import factorial class a__ : def __init__( self : List[str],_A : Tuple,_A : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = real if isinstance(_A,_A ): SCREAMING_SNAKE_CASE_ : Tuple = [1] * rank else: SCREAMING_SNAKE_CASE_ : Optional[Any] = rank def __repr__( self : List[Any] ): """simple docstring""" return ( F'{self.real}+' F'{"+".join(str(_A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}' ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real,_A ) def __add__( self : Any,_A : List[Any] ): """simple docstring""" if not isinstance(_A,_A ): return Dual(self.real + other,self.duals ) SCREAMING_SNAKE_CASE_ : str = self.duals.copy() SCREAMING_SNAKE_CASE_ : Union[str, Any] = other.duals.copy() if len(_A ) > len(_A ): o_dual.extend([1] * (len(_A ) - len(_A )) ) elif len(_A ) < len(_A ): s_dual.extend([1] * (len(_A ) - len(_A )) ) SCREAMING_SNAKE_CASE_ : int = [] for i in range(len(_A ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real,_A ) A = __add__ def __sub__( self : Any,_A : Union[str, Any] ): """simple docstring""" return self + other * -1 def __mul__( self : Tuple,_A : Any ): """simple docstring""" if not isinstance(_A,_A ): SCREAMING_SNAKE_CASE_ : int = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other,_A ) SCREAMING_SNAKE_CASE_ : str = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real,_A ) A = __mul__ def __truediv__( self : Optional[Any],_A : Dict ): """simple docstring""" if not isinstance(_A,_A ): SCREAMING_SNAKE_CASE_ : Dict = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other,_A ) raise ValueError def __floordiv__( self : List[str],_A : Any ): """simple docstring""" if not isinstance(_A,_A ): SCREAMING_SNAKE_CASE_ : Optional[Any] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other,_A ) raise ValueError def __pow__( self : Optional[Any],_A : Any ): """simple docstring""" if n < 0 or isinstance(_A,_A ): raise ValueError("power must be a positive integer" ) if n == 0: return 1 if n == 1: return self SCREAMING_SNAKE_CASE_ : Union[str, Any] = self for _ in range(n - 1 ): x *= self return x def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Any ): """simple docstring""" if not callable(lowerCAmelCase ): raise ValueError("differentiate() requires a function as input for func" ) if not isinstance(lowerCAmelCase , (float, int) ): raise ValueError("differentiate() requires a float as input for position" ) if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("differentiate() requires an int as input for order" ) SCREAMING_SNAKE_CASE_ : Tuple = Dual(lowerCAmelCase , 1 ) SCREAMING_SNAKE_CASE_ : Dict = func(lowerCAmelCase ) if order == 0: return result.real return result.duals[order - 1] * factorial(lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() def _snake_case ( lowerCAmelCase : Any ): """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
18
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
0
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" , [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] , ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 4_2 @pytest.mark.parametrize( "dataset_info" , [ DatasetInfo(), DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ), ] , ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = str(lowerCamelCase__ ) dataset_info.write_to_directory(lowerCamelCase__ ) lowerCamelCase_ = DatasetInfo.from_directory(lowerCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCamelCase__ , "dataset_info.json" ) ) def lowerCamelCase_ ( ): lowerCamelCase_ = DatasetInfo( description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , ) lowerCamelCase_ = dataset_info._to_yaml_dict() assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) lowerCamelCase_ = yaml.safe_dump(lowerCamelCase__ ) lowerCamelCase_ = yaml.safe_load(lowerCamelCase__ ) assert dataset_info_yaml_dict == reloaded def lowerCamelCase_ ( ): lowerCamelCase_ = DatasetInfo() lowerCamelCase_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" , [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=4_2 ), "v2": DatasetInfo(dataset_size=1_3_3_7 ), } ), ] , ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ = str(lowerCamelCase__ ) dataset_infos_dict.write_to_directory(lowerCamelCase__ ) lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowerCamelCase_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowerCamelCase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCamelCase__ , "README.md" ) )
19
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
0
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]: if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): lowercase : Tuple = [image] if isinstance(image[0] , PIL.Image.Image ): lowercase : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] lowercase : Optional[Any] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) lowercase : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0 lowercase : Tuple = image.transpose(0 , 3 , 1 , 2 ) lowercase : Dict = 2.0 * image - 1.0 lowercase : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(image[0] , torch.Tensor ): lowercase : Any = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return image def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9995 ) -> Any: if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ): lowercase : Optional[Any] = True lowercase : Any = va.device lowercase : Tuple = va.cpu().numpy() lowercase : Dict = va.cpu().numpy() lowercase : Any = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) ) if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD: lowercase : Any = (1 - t) * va + t * va else: lowercase : int = np.arccos(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = np.sin(SCREAMING_SNAKE_CASE__ ) lowercase : str = theta_a * t lowercase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ ) lowercase : int = np.sin(theta_a - theta_t ) / sin_theta_a lowercase : int = sin_theta_t / sin_theta_a lowercase : Dict = sa * va + sa * va if inputs_are_torch: lowercase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) return va def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: lowercase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) lowercase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: for param in model.parameters(): lowercase : List[str] = value class __snake_case ( lowerCAmelCase ): def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ,snake_case=None ,snake_case=None ,): '''simple docstring''' super().__init__() self.register_modules( vae=snake_case ,text_encoder=snake_case ,clip_model=snake_case ,tokenizer=snake_case ,unet=snake_case ,scheduler=snake_case ,feature_extractor=snake_case ,coca_model=snake_case ,coca_tokenizer=snake_case ,coca_transform=snake_case ,) lowercase : Optional[int] = ( feature_extractor.size if isinstance(feature_extractor.size ,snake_case ) else feature_extractor.size["""shortest_edge"""] ) lowercase : Dict = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std ) set_requires_grad(self.text_encoder ,snake_case ) set_requires_grad(self.clip_model ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.enable_attention_slicing(snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' set_requires_grad(self.vae ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' set_requires_grad(self.vae ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' set_requires_grad(self.unet ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' set_requires_grad(self.unet ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ): '''simple docstring''' lowercase : Optional[int] = min(int(num_inference_steps * strength ) ,snake_case ) lowercase : List[Any] = max(num_inference_steps - init_timestep ,0 ) lowercase : int = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=None ): '''simple docstring''' if not isinstance(snake_case ,torch.Tensor ): raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(snake_case )}" ) lowercase : List[str] = image.to(device=snake_case ,dtype=snake_case ) if isinstance(snake_case ,snake_case ): lowercase : Optional[int] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case ) ] lowercase : Tuple = torch.cat(snake_case ,dim=0 ) else: lowercase : List[str] = self.vae.encode(snake_case ).latent_dist.sample(snake_case ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowercase : Any = 0.18_215 * init_latents lowercase : Dict = init_latents.repeat_interleave(snake_case ,dim=0 ) lowercase : List[str] = randn_tensor(init_latents.shape ,generator=snake_case ,device=snake_case ,dtype=snake_case ) # get latents lowercase : Optional[int] = self.scheduler.add_noise(snake_case ,snake_case ,snake_case ) lowercase : Optional[Any] = init_latents return latents def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Dict = self.coca_transform(snake_case ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): lowercase : List[Any] = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) ) lowercase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' lowercase : Optional[int] = self.feature_extractor.preprocess(snake_case ) lowercase : Optional[int] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() lowercase : List[Any] = self.clip_model.get_image_features(snake_case ) lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case ) lowercase : Tuple = image_embeddings_clip.repeat_interleave(snake_case ,dim=0 ) return image_embeddings_clip @torch.enable_grad() def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,): '''simple docstring''' lowercase : Optional[int] = latents.detach().requires_grad_() lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case ) # predict the noise residual lowercase : Optional[int] = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): lowercase : Optional[int] = self.scheduler.alphas_cumprod[timestep] lowercase : Union[str, Any] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 lowercase : int = torch.sqrt(snake_case ) lowercase : Any = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler ,snake_case ): lowercase : Dict = self.scheduler.sigmas[index] lowercase : Tuple = latents - sigma * noise_pred else: raise ValueError(f"scheduler type {type(self.scheduler )} not supported" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowercase : Optional[Any] = 1 / 0.18_215 * sample lowercase : Union[str, Any] = self.vae.decode(snake_case ).sample lowercase : str = (image / 2 + 0.5).clamp(0 ,1 ) lowercase : int = transforms.Resize(self.feature_extractor_size )(snake_case ) lowercase : Tuple = self.normalize(snake_case ).to(latents.dtype ) lowercase : Tuple = self.clip_model.get_image_features(snake_case ) lowercase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=snake_case ) lowercase : str = spherical_dist_loss(snake_case ,snake_case ).mean() * clip_guidance_scale lowercase : List[Any] = -torch.autograd.grad(snake_case ,snake_case )[0] if isinstance(self.scheduler ,snake_case ): lowercase : Any = latents.detach() + grads * (sigma**2) lowercase : Optional[Any] = noise_pred_original else: lowercase : int = noise_pred_original - torch.sqrt(snake_case ) * grads return noise_pred, latents @torch.no_grad() def __call__( self ,snake_case ,snake_case ,snake_case = None ,snake_case = None ,snake_case = 512 ,snake_case = 512 ,snake_case = 0.6 ,snake_case = 50 ,snake_case = 7.5 ,snake_case = 1 ,snake_case = 0.0 ,snake_case = 100 ,snake_case = None ,snake_case = "pil" ,snake_case = True ,snake_case = 0.8 ,snake_case = 0.1 ,snake_case = 0.1 ,): '''simple docstring''' if isinstance(snake_case ,snake_case ) and len(snake_case ) != batch_size: raise ValueError(f"You have passed {batch_size} batch_size, but only {len(snake_case )} generators." ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if isinstance(snake_case ,torch.Generator ) and batch_size > 1: lowercase : str = [generator] + [None] * (batch_size - 1) lowercase : Union[str, Any] = [ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] lowercase : Optional[int] = [x[0] for x in coca_is_none if x[1]] lowercase : int = """, """.join(snake_case ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(snake_case ): raise ValueError( f"Content prompt is None and CoCa [{coca_is_none_str}] is None." f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) lowercase : Optional[int] = self.get_image_description(snake_case ) if style_prompt is None: if len(snake_case ): raise ValueError( f"Style prompt is None and CoCa [{coca_is_none_str}] is None." f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." ) lowercase : str = self.get_image_description(snake_case ) # get prompt text embeddings for content and style lowercase : List[Any] = self.tokenizer( snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,) lowercase : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] lowercase : Optional[int] = self.tokenizer( snake_case ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=snake_case ,return_tensors="""pt""" ,) lowercase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] lowercase : Optional[Any] = slerp(snake_case ,snake_case ,snake_case ) # duplicate text embeddings for each generation per prompt lowercase : str = text_embeddings.repeat_interleave(snake_case ,dim=0 ) # set timesteps lowercase : str = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) lowercase : Tuple = {} if accepts_offset: lowercase : int = 1 self.scheduler.set_timesteps(snake_case ,**snake_case ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) lowercase , lowercase : Optional[int] = self.get_timesteps(snake_case ,snake_case ,self.device ) lowercase : Tuple = timesteps[:1].repeat(snake_case ) # Preprocess image lowercase : str = preprocess(snake_case ,snake_case ,snake_case ) lowercase : int = self.prepare_latents( snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case ) lowercase : List[Any] = preprocess(snake_case ,snake_case ,snake_case ) lowercase : Tuple = self.prepare_latents( snake_case ,snake_case ,snake_case ,text_embeddings.dtype ,self.device ,snake_case ) lowercase : List[str] = slerp(snake_case ,snake_case ,snake_case ) if clip_guidance_scale > 0: lowercase : Union[str, Any] = self.get_clip_image_embeddings(snake_case ,snake_case ) lowercase : Optional[int] = self.get_clip_image_embeddings(snake_case ,snake_case ) lowercase : Optional[int] = slerp( snake_case ,snake_case ,snake_case ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase : str = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase : List[str] = content_text_input.input_ids.shape[-1] lowercase : Optional[Any] = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=snake_case ,return_tensors="""pt""" ) lowercase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt lowercase : Any = uncond_embeddings.repeat_interleave(snake_case ,dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase : Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8) lowercase : Tuple = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps lowercase : str = torch.randn(snake_case ,generator=snake_case ,device="""cpu""" ,dtype=snake_case ).to( self.device ) else: lowercase : Optional[int] = torch.randn(snake_case ,generator=snake_case ,device=self.device ,dtype=snake_case ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) lowercase : Any = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowercase : Dict = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase : str = {} if accepts_eta: lowercase : str = eta # check if the scheduler accepts generator lowercase : Optional[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: lowercase : Tuple = generator with self.progress_bar(total=snake_case ): for i, t in enumerate(snake_case ): # expand the latents if we are doing classifier free guidance lowercase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase : Optional[int] = self.scheduler.scale_model_input(snake_case ,snake_case ) # predict the noise residual lowercase : Dict = self.unet(snake_case ,snake_case ,encoder_hidden_states=snake_case ).sample # perform classifier free guidance if do_classifier_free_guidance: lowercase , lowercase : str = noise_pred.chunk(2 ) lowercase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: lowercase : int = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) lowercase , lowercase : Union[str, Any] = self.cond_fn( snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,) # compute the previous noisy sample x_t -> x_t-1 lowercase : Any = self.scheduler.step(snake_case ,snake_case ,snake_case ,**snake_case ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowercase : Optional[Any] = 1 / 0.18_215 * latents lowercase : Any = self.vae.decode(snake_case ).sample lowercase : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 ) lowercase : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": lowercase : List[str] = self.numpy_to_pil(snake_case ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=snake_case ,nsfw_content_detected=snake_case )
20
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
0
def UpperCamelCase_( lowerCamelCase_ ) -> bool: _lowercase : str = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def UpperCamelCase_( lowerCamelCase_ = 5000 ) -> int: _lowercase : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCamelCase_ )] for i, pentagonal_i in enumerate(lowerCamelCase_ ): for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ): _lowercase : List[Any] = pentagonal_nums[j] _lowercase : Optional[int] = pentagonal_i + pentagonal_j _lowercase : str = pentagonal_j - pentagonal_i if is_pentagonal(lowerCamelCase_ ) and is_pentagonal(lowerCamelCase_ ): return b return -1 if __name__ == "__main__": print(F"{solution() = }")
21
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE :Dict = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class A_ ( lowerCAmelCase_ ): _lowerCamelCase : str = """deta""" _lowerCamelCase : List[str] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self : Optional[Any] , snake_case_ : Dict=None , snake_case_ : int=9_0_0 , snake_case_ : Tuple=2_0_4_8 , snake_case_ : Union[str, Any]=6 , snake_case_ : Tuple=2_0_4_8 , snake_case_ : int=8 , snake_case_ : str=6 , snake_case_ : int=1_0_2_4 , snake_case_ : str=8 , snake_case_ : Tuple=0.0 , snake_case_ : int=True , snake_case_ : str="relu" , snake_case_ : Optional[int]=2_5_6 , snake_case_ : int=0.1 , snake_case_ : int=0.0 , snake_case_ : Dict=0.0 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Optional[int]=1.0 , snake_case_ : Tuple=True , snake_case_ : Dict=False , snake_case_ : Any="sine" , snake_case_ : int=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]=3_0_0 , snake_case_ : str=True , snake_case_ : Optional[Any]=True , snake_case_ : Dict=1 , snake_case_ : Optional[int]=5 , snake_case_ : str=2 , snake_case_ : int=1 , snake_case_ : Union[str, Any]=1 , snake_case_ : Dict=5 , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=0.2_5 , **snake_case_ : Optional[Any] , ): if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(snake_case_ , snake_case_ ): _UpperCAmelCase = backbone_config.pop("model_type" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(snake_case_ ) _UpperCAmelCase = backbone_config _UpperCAmelCase = num_queries _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = d_model _UpperCAmelCase = encoder_ffn_dim _UpperCAmelCase = encoder_layers _UpperCAmelCase = encoder_attention_heads _UpperCAmelCase = decoder_ffn_dim _UpperCAmelCase = decoder_layers _UpperCAmelCase = decoder_attention_heads _UpperCAmelCase = dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = activation_function _UpperCAmelCase = init_std _UpperCAmelCase = init_xavier_std _UpperCAmelCase = encoder_layerdrop _UpperCAmelCase = auxiliary_loss _UpperCAmelCase = position_embedding_type # deformable attributes _UpperCAmelCase = num_feature_levels _UpperCAmelCase = encoder_n_points _UpperCAmelCase = decoder_n_points _UpperCAmelCase = two_stage _UpperCAmelCase = two_stage_num_proposals _UpperCAmelCase = with_box_refine _UpperCAmelCase = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher _UpperCAmelCase = class_cost _UpperCAmelCase = bbox_cost _UpperCAmelCase = giou_cost # Loss coefficients _UpperCAmelCase = mask_loss_coefficient _UpperCAmelCase = dice_loss_coefficient _UpperCAmelCase = bbox_loss_coefficient _UpperCAmelCase = giou_loss_coefficient _UpperCAmelCase = eos_coefficient _UpperCAmelCase = focal_alpha super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ ) @property def lowercase ( self : List[Any] ): return self.encoder_attention_heads @property def lowercase ( self : Dict ): return self.d_model def lowercase ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
22
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType UpperCamelCase__: Any = logging.get_logger(__name__) UpperCamelCase__: Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off UpperCamelCase__: Optional[Any] = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786, 11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791, 17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409, 34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361 ] UpperCamelCase__: List[str] = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, 42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362 ] class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" lowerCamelCase__ = """whisper""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Dict , __snake_case : Tuple=51865 , __snake_case : Union[str, Any]=80 , __snake_case : str=6 , __snake_case : int=4 , __snake_case : Optional[Any]=6 , __snake_case : Tuple=4 , __snake_case : Optional[Any]=1536 , __snake_case : Tuple=1536 , __snake_case : Union[str, Any]=0.0 , __snake_case : List[str]=0.0 , __snake_case : Optional[int]=50257 , __snake_case : Dict=True , __snake_case : int=True , __snake_case : Optional[int]="gelu" , __snake_case : Tuple=256 , __snake_case : Any=0.0 , __snake_case : List[Any]=0.0 , __snake_case : str=0.0 , __snake_case : str=0.02 , __snake_case : List[str]=False , __snake_case : Any=1500 , __snake_case : List[Any]=448 , __snake_case : Any=50256 , __snake_case : List[Any]=50256 , __snake_case : Tuple=50256 , __snake_case : Optional[Any]=None , __snake_case : str=[220, 50256] , __snake_case : Tuple=False , __snake_case : Dict=256 , __snake_case : Tuple=False , __snake_case : Tuple=0.05 , __snake_case : int=10 , __snake_case : str=2 , __snake_case : Optional[Any]=0.0 , __snake_case : str=10 , __snake_case : Optional[int]=0 , __snake_case : Optional[int]=7 , **__snake_case : Optional[int] , ) -> List[str]: UpperCAmelCase : List[str] = vocab_size UpperCAmelCase : int = num_mel_bins UpperCAmelCase : Optional[int] = d_model UpperCAmelCase : str = encoder_layers UpperCAmelCase : Tuple = encoder_attention_heads UpperCAmelCase : Optional[Any] = decoder_layers UpperCAmelCase : List[str] = decoder_attention_heads UpperCAmelCase : int = decoder_ffn_dim UpperCAmelCase : List[str] = encoder_ffn_dim UpperCAmelCase : List[str] = dropout UpperCAmelCase : List[str] = attention_dropout UpperCAmelCase : Optional[int] = activation_dropout UpperCAmelCase : Optional[int] = activation_function UpperCAmelCase : str = init_std UpperCAmelCase : Union[str, Any] = encoder_layerdrop UpperCAmelCase : Dict = decoder_layerdrop UpperCAmelCase : Union[str, Any] = use_cache UpperCAmelCase : List[Any] = encoder_layers UpperCAmelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase : str = max_source_positions UpperCAmelCase : List[Any] = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCAmelCase : str = classifier_proj_size UpperCAmelCase : Optional[int] = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase : str = apply_spec_augment UpperCAmelCase : List[Any] = mask_time_prob UpperCAmelCase : str = mask_time_length UpperCAmelCase : Union[str, Any] = mask_time_min_masks UpperCAmelCase : str = mask_feature_prob UpperCAmelCase : List[Any] = mask_feature_length UpperCAmelCase : List[str] = mask_feature_min_masks UpperCAmelCase : Dict = median_filter_width super().__init__( pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , suppress_tokens=__snake_case , begin_suppress_tokens=__snake_case , **__snake_case , ) class SCREAMING_SNAKE_CASE( A__ ): """simple docstring""" @property def A ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase : Dict = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase : Any = {0: '''batch'''} else: UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='''inputs''' ) return common_inputs def A ( self : List[str] , __snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , __snake_case : int = 22050 , __snake_case : float = 5.0 , __snake_case : int = 220 , ) -> Mapping[str, Any]: UpperCAmelCase : int = OrderedDict() UpperCAmelCase : List[str] = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=__snake_case , framework=__snake_case , sampling_rate=__snake_case , time_duration=__snake_case , frequency=__snake_case , ) UpperCAmelCase : int = encoder_inputs['''input_features'''].shape[2] UpperCAmelCase : Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs( preprocessor.tokenizer , __snake_case , __snake_case , __snake_case , __snake_case ) UpperCAmelCase : Optional[int] = encoder_inputs.pop('''input_features''' ) UpperCAmelCase : int = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: UpperCAmelCase : Union[str, Any] = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A ( self : List[Any] ) -> float: return 1E-3
23
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy snake_case_ = logging.getLogger(__name__) def lowerCamelCase__ ( snake_case_ : torch.nn.Module , snake_case_ : BnbQuantizationConfig , snake_case_ : Union[str, os.PathLike] = None , snake_case_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , snake_case_ : Optional[List[str]] = None , snake_case_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , ) -> Optional[Any]: __snake_case = bnb_quantization_config.load_in_abit __snake_case = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) __snake_case = [] # custom device map if isinstance(snake_case_ , snake_case_ ) and len(device_map.keys() ) > 1: __snake_case = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: __snake_case = get_keys_to_not_convert(snake_case_ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(snake_case_ ) __snake_case = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: __snake_case = [] __snake_case = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(snake_case_ ) # compatibility with peft __snake_case = load_in_abit __snake_case = load_in_abit __snake_case = get_parameter_device(snake_case_ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) __snake_case = replace_with_bnb_layers(snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ ) # convert param to the right dtype __snake_case = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: __snake_case = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) __snake_case = getattr(snake_case_ , snake_case_ , snake_case_ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(snake_case_ ): param.to(snake_case_ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( f"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): __snake_case = replace_with_bnb_layers( snake_case_ , snake_case_ , modules_to_not_convert=snake_case_ ) __snake_case = get_quantized_model_device_map( snake_case_ , snake_case_ , snake_case_ , max_memory=snake_case_ , no_split_module_classes=snake_case_ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): __snake_case = True __snake_case = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( snake_case_ , snake_case_ , snake_case_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case_ , offload_state_dict=snake_case_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(snake_case_ , device_map=snake_case_ , offload_dir=snake_case_ ) def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any]=None , snake_case_ : int=None , snake_case_ : Dict=None ) -> Any: if device_map is None: if torch.cuda.is_available(): __snake_case = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(snake_case_ , snake_case_ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) __snake_case = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) __snake_case = {} __snake_case = special_dtypes __snake_case = no_split_module_classes __snake_case = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": __snake_case = get_balanced_memory( snake_case_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=snake_case_ , **snake_case_ , ) __snake_case = max_memory __snake_case = infer_auto_device_map(snake_case_ , **snake_case_ ) if isinstance(snake_case_ , snake_case_ ): # check if don't have any quantized module on the cpu __snake_case = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules __snake_case = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None ) -> int: if modules_to_not_convert is None: __snake_case = [] __snake_case , __snake_case = _replace_with_bnb_layers( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : str=None , snake_case_ : int=None , ) -> Any: __snake_case = False for name, module in model.named_children(): if current_key_name is None: __snake_case = [] current_key_name.append(snake_case_ ) if isinstance(snake_case_ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` __snake_case = '''.'''.join(snake_case_ ) __snake_case = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: __snake_case = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: __snake_case = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case_ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: __snake_case = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) __snake_case = module.weight.data if module.bias is not None: __snake_case = module.bias.data bnb_module.requires_grad_(snake_case_ ) setattr(snake_case_ , snake_case_ , snake_case_ ) __snake_case = True if len(list(module.children() ) ) > 0: __snake_case , __snake_case = _replace_with_bnb_layers( snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __snake_case = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCamelCase__ ( snake_case_ : int ) -> Dict: # Create a copy of the model with init_empty_weights(): __snake_case = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` __snake_case = find_tied_parameters(snake_case_ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case_ , snake_case_ ): __snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __snake_case = sum(snake_case_ , [] ) __snake_case = len(snake_case_ ) > 0 # Check if it is a base model __snake_case = False if hasattr(snake_case_ , '''base_model_prefix''' ): __snake_case = not hasattr(snake_case_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __snake_case = list(model.named_children() ) __snake_case = [list_modules[-1][0]] # add last module together with tied weights __snake_case = set(snake_case_ ) - set(snake_case_ ) __snake_case = list(set(snake_case_ ) ) + list(snake_case_ ) # remove ".weight" from the keys __snake_case = ['''.weight''', '''.bias'''] __snake_case = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __snake_case = name.replace(snake_case_ , '''''' ) filtered_module_names.append(snake_case_ ) return filtered_module_names def lowerCamelCase__ ( snake_case_ : str ) -> str: for m in model.modules(): if isinstance(snake_case_ , bnb.nn.Linearabit ): return True return False def lowerCamelCase__ ( snake_case_ : nn.Module ) -> Dict: return next(parameter.parameters() ).device def lowerCamelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Dict ) -> List[str]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(snake_case_ , snake_case_ , 0 , dtype=snake_case_ , value=snake_case_ ) __snake_case = param_name __snake_case = model if "." in tensor_name: __snake_case = tensor_name.split('''.''' ) for split in splits[:-1]: __snake_case = getattr(snake_case_ , snake_case_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) __snake_case = new_module __snake_case = splits[-1] # offload weights __snake_case = False offload_weight(module._parameters[tensor_name] , snake_case_ , snake_case_ , index=snake_case_ ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , snake_case_ , index=snake_case_ , ) else: offload_weight(snake_case_ , snake_case_ , snake_case_ , index=snake_case_ ) offload_weight(snake_case_ , param_name.replace('''weight''' , '''SCB''' ) , snake_case_ , index=snake_case_ ) set_module_tensor_to_device(snake_case_ , snake_case_ , '''meta''' , dtype=snake_case_ , value=torch.empty(*param.size() ) )
24
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[Any] = '''bert-generation''' def __init__(self , SCREAMING_SNAKE_CASE__=5_03_58 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_size SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : Any = num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Any = intermediate_size SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Dict = initializer_range SCREAMING_SNAKE_CASE__ : int = layer_norm_eps SCREAMING_SNAKE_CASE__ : List[str] = position_embedding_type SCREAMING_SNAKE_CASE__ : Any = use_cache
25
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
def lowerCAmelCase_ ( snake_case_ ): _A : Tuple = [] _A : Union[str, Any] = [] _A : Optional[int] = { """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator _A : Dict = len(snake_case_ ) if (len(snake_case_ ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ),"""Stack""".center(snake_case_ ),"""Postfix""".center(snake_case_ ),sep=""" | """,) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(snake_case_ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(snake_case_ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(snake_case_ ) == 0: stack.append(snake_case_ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(snake_case_ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(snake_case_ ) # push x to stack print( x.center(8 ),("""""".join(snake_case_ )).ljust(snake_case_ ),("""""".join(snake_case_ )).ljust(snake_case_ ),sep=""" | """,) # Output in tabular format while len(snake_case_ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ),("""""".join(snake_case_ )).ljust(snake_case_ ),("""""".join(snake_case_ )).ljust(snake_case_ ),sep=""" | """,) # Output in tabular format return "".join(snake_case_ ) # return Postfix as str def lowerCAmelCase_ ( snake_case_ ): _A : str = list(infix[::-1] ) # reverse the infix equation for i in range(len(snake_case_ ) ): if infix[i] == "(": _A : Any = """)""" # change "(" to ")" elif infix[i] == ")": _A : int = """(""" # change ")" to "(" return (infix_2_postfix("""""".join(snake_case_ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": _snake_case = input("\nEnter an Infix Equation = ") # Input an Infix equation _snake_case = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
26
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : Optional[Any] = logging.get_logger(__name__) __lowercase : Tuple = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class __UpperCamelCase ( lowerCAmelCase_ ): A_ = "markuplm" def __init__( self , __a=3_0522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=0 , __a=0 , __a=2 , __a=256 , __a=1024 , __a=216 , __a=1001 , __a=32 , __a=50 , __a="absolute" , __a=True , __a=None , **__a , ): '''simple docstring''' super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , ) __a : List[str] = vocab_size __a : Optional[Any] = hidden_size __a : Optional[int] = num_hidden_layers __a : Optional[Any] = num_attention_heads __a : Optional[int] = hidden_act __a : Union[str, Any] = intermediate_size __a : str = hidden_dropout_prob __a : List[Any] = attention_probs_dropout_prob __a : Dict = max_position_embeddings __a : Optional[Any] = type_vocab_size __a : Union[str, Any] = initializer_range __a : Optional[Any] = layer_norm_eps __a : Optional[Any] = position_embedding_type __a : Optional[int] = use_cache __a : Optional[Any] = classifier_dropout # additional properties __a : int = max_depth __a : Union[str, Any] = max_xpath_tag_unit_embeddings __a : str = max_xpath_subs_unit_embeddings __a : Optional[Any] = tag_pad_id __a : Dict = subs_pad_id __a : List[str] = xpath_unit_hidden_size
27
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
'''simple docstring''' import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger _lowerCamelCase : Dict = get_logger(__name__) class SCREAMING_SNAKE_CASE ( enum.Enum ): """simple docstring""" _SCREAMING_SNAKE_CASE = """all_checks""" _SCREAMING_SNAKE_CASE = """basic_checks""" _SCREAMING_SNAKE_CASE = """no_checks""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __lowerCamelCase ( A__ , A__ , A__=None ) -> List[Any]: """simple docstring""" if expected_checksums is None: logger.info('Unable to verify checksums.' ) return if len(set(A__ ) - set(A__ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(A__ ) - set(A__ ) ) ) if len(set(A__ ) - set(A__ ) ) > 0: raise UnexpectedDownloadedFile(str(set(A__ ) - set(A__ ) ) ) UpperCamelCase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] UpperCamelCase = ' for ' + verification_name if verification_name is not None else '' if len(A__ ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" 'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' ) logger.info('All the checksums matched successfully' + for_verification_name ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __lowerCamelCase ( A__ , A__ ) -> Tuple: """simple docstring""" if expected_splits is None: logger.info('Unable to verify splits sizes.' ) return if len(set(A__ ) - set(A__ ) ) > 0: raise ExpectedMoreSplits(str(set(A__ ) - set(A__ ) ) ) if len(set(A__ ) - set(A__ ) ) > 0: raise UnexpectedSplits(str(set(A__ ) - set(A__ ) ) ) UpperCamelCase = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(A__ ) > 0: raise NonMatchingSplitsSizesError(str(A__ ) ) logger.info('All the splits matched successfully.' ) def __lowerCamelCase ( A__ , A__ = True ) -> dict: """simple docstring""" if record_checksum: UpperCamelCase = shaaaa() with open(A__ , 'rb' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B'' ): m.update(A__ ) UpperCamelCase = m.hexdigest() else: UpperCamelCase = None return {"num_bytes": os.path.getsize(A__ ), "checksum": checksum} def __lowerCamelCase ( A__ ) -> Optional[int]: """simple docstring""" if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
28
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCamelCase : '''simple docstring''' def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , _UpperCamelCase=1_0_0_0 , ) -> Any: UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : int = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : str = use_input_mask UpperCAmelCase_ : List[Any] = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Dict = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Tuple = max_position_embeddings UpperCAmelCase_ : Optional[int] = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[str] = initializer_range UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : Tuple = num_choices UpperCAmelCase_ : List[Any] = scope UpperCAmelCase_ : Union[str, Any] = range_bbox def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : Optional[int] = bbox[i, j, 3] UpperCAmelCase_ : Tuple = bbox[i, j, 1] UpperCAmelCase_ : Union[str, Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Tuple = t UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) UpperCAmelCase_ : str = None if self.use_input_mask: UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : str = None UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[Any] = None if self.use_labels: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Tuple = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : Any = TFLayoutLMModel(config=_UpperCamelCase ) UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase , token_type_ids=_UpperCamelCase ) UpperCAmelCase_ : Tuple = model(_UpperCamelCase , _UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = TFLayoutLMForMaskedLM(config=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int: UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : Union[str, Any] = TFLayoutLMForSequenceClassification(config=_UpperCamelCase ) UpperCAmelCase_ : Dict = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : int = TFLayoutLMForTokenClassification(config=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any: UpperCAmelCase_ : List[str] = TFLayoutLMForQuestionAnswering(config=_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Dict = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : List[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ): '''simple docstring''' _snake_case : int = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _snake_case : List[Any] = ( { '''feature-extraction''': TFLayoutLMModel, '''fill-mask''': TFLayoutLMForMaskedLM, '''text-classification''': TFLayoutLMForSequenceClassification, '''token-classification''': TFLayoutLMForTokenClassification, '''zero-shot''': TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _snake_case : Optional[int] = False _snake_case : Dict = True _snake_case : Dict = 1_0 def __UpperCAmelCase ( self ) -> List[str]: UpperCAmelCase_ : Tuple = TFLayoutLMModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def __UpperCAmelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase ) @slow def __UpperCAmelCase ( self ) -> List[str]: for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = TFLayoutLMModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def __UpperCAmelCase ( self ) -> Tuple: pass def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Dict = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 UpperCAmelCase_ : Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 UpperCAmelCase_ : str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 UpperCAmelCase_ : Any = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) UpperCAmelCase_ : int = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCamelCase (unittest.TestCase ): '''simple docstring''' @slow def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : List[Any] = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase_ : Union[str, Any] = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) # test the sequence output on [0, :3, :3] UpperCAmelCase_ : Any = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1E-3 ) ) # test the pooled output on [1, :3] UpperCAmelCase_ : Tuple = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _UpperCamelCase , atol=1E-3 ) ) @slow def __UpperCAmelCase ( self ) -> str: # initialize model with randomly initialized sequence classification head UpperCAmelCase_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase_ : str = model( input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar UpperCAmelCase_ : List[str] = outputs.loss UpperCAmelCase_ : Tuple = (2,) self.assertEqual(loss.shape , _UpperCamelCase ) # test the shape of the logits UpperCAmelCase_ : Dict = outputs.logits UpperCAmelCase_ : List[str] = (2, 2) self.assertEqual(logits.shape , _UpperCamelCase ) @slow def __UpperCAmelCase ( self ) -> List[Any]: # initialize model with randomly initialized token classification head UpperCAmelCase_ : int = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase_ : str = model( input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) # test the shape of the logits UpperCAmelCase_ : int = outputs.logits UpperCAmelCase_ : str = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape , _UpperCamelCase ) @slow def __UpperCAmelCase ( self ) -> List[str]: # initialize model with randomly initialized token classification head UpperCAmelCase_ : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = prepare_layoutlm_batch_inputs() # forward pass UpperCAmelCase_ : Tuple = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase ) # test the shape of the logits UpperCAmelCase_ : Tuple = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape , _UpperCamelCase ) self.assertEqual(outputs.end_logits.shape , _UpperCamelCase )
29
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
def a ( snake_case__: int ): '''simple docstring''' if num <= 0: raise ValueError('''Input must be a positive integer''' ) lowercase_ = [True] * (num + 1) lowercase_ = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , snake_case__ ): lowercase_ = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __a = int(input('Enter a positive integer: ').strip()) print(prime_sieve_eratosthenes(user_num))
30
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __SCREAMING_SNAKE_CASE : Dict = { """vocab_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-german-cased""": ( """https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json""" ), """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json""" ), }, } __SCREAMING_SNAKE_CASE : Optional[Any] = { """distilbert-base-uncased""": 512, """distilbert-base-uncased-distilled-squad""": 512, """distilbert-base-cased""": 512, """distilbert-base-cased-distilled-squad""": 512, """distilbert-base-german-cased""": 512, """distilbert-base-multilingual-cased""": 512, } __SCREAMING_SNAKE_CASE : List[Any] = { """distilbert-base-uncased""": {"""do_lower_case""": True}, """distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True}, """distilbert-base-cased""": {"""do_lower_case""": False}, """distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False}, """distilbert-base-german-cased""": {"""do_lower_case""": False}, """distilbert-base-multilingual-cased""": {"""do_lower_case""": False}, } class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Union[str, Any] = VOCAB_FILES_NAMES __UpperCamelCase: str = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase: Any = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase: str = ["input_ids", "attention_mask"] __UpperCamelCase: List[str] = DistilBertTokenizer def __init__( self : str , A : int=None , A : Tuple=None , A : Tuple=True , A : Dict="[UNK]" , A : List[Any]="[SEP]" , A : Optional[Any]="[PAD]" , A : Dict="[CLS]" , A : Tuple="[MASK]" , A : str=True , A : Dict=None , **A : List[Any] , ): super().__init__( A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , ) _UpperCAmelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , A ) != do_lower_case or normalizer_state.get("strip_accents" , A ) != strip_accents or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars ): _UpperCAmelCase : Dict = getattr(A , normalizer_state.pop("type" ) ) _UpperCAmelCase : int = do_lower_case _UpperCAmelCase : Optional[int] = strip_accents _UpperCAmelCase : str = tokenize_chinese_chars _UpperCAmelCase : List[Any] = normalizer_class(**A ) _UpperCAmelCase : Dict = do_lower_case def _A ( self : List[Any] , A : Tuple , A : Any=None ): _UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _A ( self : int , A : List[int] , A : Optional[List[int]] = None ): _UpperCAmelCase : Any = [self.sep_token_id] _UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _A ( self : Dict , A : str , A : Optional[str] = None ): _UpperCAmelCase : Any = self._tokenizer.model.save(A , name=A ) return tuple(A )
31
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : int , __A : Optional[int] ) -> Tuple: """simple docstring""" a_ : Tuple = os.path.abspath(__A ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model a_ : List[str] = tf.train.list_variables(__A ) a_ : Dict = [] a_ : str = [] a_ : List[Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") a_ : Union[str, Any] = full_name.split('/' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' a_ : Dict = name[1:] # figure out how many levels deep the name is a_ : str = 0 for _name in name: if _name.startswith('layer_with_weights' ): depth += 1 else: break layer_depth.append(__A ) # read data a_ : Any = tf.train.load_variable(__A , __A ) names.append('/'.join(__A ) ) arrays.append(__A ) logger.info(F"""Read a total of {len(__A ):,} layers""" ) # Sanity check if len(set(__A ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__A ) )})""" ) a_ : Union[str, Any] = list(set(__A ) )[0] if layer_depth != 1: raise ValueError( 'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP' ' heads.' ) # convert layers logger.info('Converting weights...' ) for full_name, array in zip(__A , __A ): a_ : List[str] = full_name.split('/' ) a_ : List[str] = model a_ : int = [] for i, m_name in enumerate(__A ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('layer_with_weights' ): a_ : Optional[Any] = int(m_name.split('-' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['embeddings', 'LayerNorm'] ) a_ : List[str] = getattr(__A , 'embeddings' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['encoder', 'layer', str(layer_num - 4 )] ) a_ : Optional[int] = getattr(__A , 'encoder' ) a_ : Union[str, Any] = getattr(__A , 'layer' ) a_ : List[str] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['pooler', 'dense'] ) a_ : str = getattr(__A , 'pooler' ) a_ : List[Any] = getattr(__A , 'dense' ) elif m_name == "embeddings": trace.append('embeddings' ) a_ : Optional[int] = getattr(__A , 'embeddings' ) if layer_num == 0: trace.append('word_embeddings' ) a_ : int = getattr(__A , 'word_embeddings' ) elif layer_num == 1: trace.append('position_embeddings' ) a_ : List[str] = getattr(__A , 'position_embeddings' ) elif layer_num == 2: trace.append('token_type_embeddings' ) a_ : str = getattr(__A , 'token_type_embeddings' ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append('weight' ) a_ : Any = getattr(__A , 'weight' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['attention', 'self'] ) a_ : Dict = getattr(__A , 'attention' ) a_ : Optional[int] = getattr(__A , 'self' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['attention', 'output', 'LayerNorm'] ) a_ : Optional[int] = getattr(__A , 'attention' ) a_ : Dict = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['attention', 'output', 'dense'] ) a_ : Optional[int] = getattr(__A , 'attention' ) a_ : int = getattr(__A , 'output' ) a_ : List[Any] = getattr(__A , 'dense' ) elif m_name == "_output_dense": # output dense trace.extend(['output', 'dense'] ) a_ : Any = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'dense' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['output', 'LayerNorm'] ) a_ : Tuple = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif m_name == "_key_dense": # attention key trace.append('key' ) a_ : Optional[int] = getattr(__A , 'key' ) elif m_name == "_query_dense": # attention query trace.append('query' ) a_ : Tuple = getattr(__A , 'query' ) elif m_name == "_value_dense": # attention value trace.append('value' ) a_ : Any = getattr(__A , 'value' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['intermediate', 'dense'] ) a_ : Any = getattr(__A , 'intermediate' ) a_ : Optional[int] = getattr(__A , 'dense' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('output' ) a_ : Optional[int] = getattr(__A , 'output' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('bias' ) a_ : Any = getattr(__A , 'bias' ) elif m_name in ["kernel", "gamma"]: trace.append('weight' ) a_ : str = getattr(__A , 'weight' ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary a_ : Union[str, Any] = '.'.join(__A ) if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __A ) or re.match( R'(\S+)\.attention\.output\.dense\.weight' , __A ): a_ : Dict = array.reshape(pointer.data.shape ) if "kernel" in full_name: a_ : Optional[Any] = array.transpose() if pointer.shape == array.shape: a_ : Tuple = torch.from_numpy(__A ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[int] , __A : List[str] ) -> List[Any]: """simple docstring""" logger.info(F"""Loading model based on config from {config_path}...""" ) a_ : str = BertConfig.from_json_file(__A ) a_ : Optional[Any] = BertModel(__A ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(__A , __A , __A ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , __A ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
32
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
0
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() __A : List[Any] = 2 class _UpperCAmelCase : def __init__( self : Tuple , *, # begin keyword-only arguments A : Tuple="<s>" , A : List[str]="<pad>" , A : Optional[Any]="</s>" , A : str="<unk>" , A : int=None , ) -> Union[str, Any]: lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = bos, unk, pad, eos lowercase_ : Tuple = [] lowercase_ : Union[str, Any] = [] lowercase_ : Dict = {} lowercase_ : List[Any] = self.add_symbol(A ) lowercase_ : Optional[Any] = self.add_symbol(A ) lowercase_ : Optional[Any] = self.add_symbol(A ) lowercase_ : str = self.add_symbol(A ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(A ) lowercase_ : int = len(self.symbols ) def __eq__( self : str , A : Tuple ) -> Any: return self.indices == other.indices def __getitem__( self : int , A : Tuple ) -> Any: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self : Any ) -> Union[str, Any]: return len(self.symbols ) def __contains__( self : Optional[Any] , A : Optional[int] ) -> Dict: return sym in self.indices @classmethod def A ( cls : Optional[int] , A : Dict ) -> Any: lowercase_ : Any = cls() d.add_from_file(A ) return d def A ( self : List[Any] , A : int , A : List[Any]=1 , A : List[str]=False ) -> Dict: if word in self.indices and not overwrite: lowercase_ : Optional[int] = self.indices[word] lowercase_ : Tuple = self.count[idx] + n return idx else: lowercase_ : Dict = len(self.symbols ) lowercase_ : int = idx self.symbols.append(A ) self.count.append(A ) return idx def A ( self : int , A : Tuple ) -> List[str]: return 0 def A ( self : str , A : str ) -> Tuple: if isinstance(A , A ): try: with open(A , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(A ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(A ) ) return lowercase_ : Any = f.readlines() lowercase_ : int = self._load_meta(A ) for line in lines[indices_start_line:]: try: lowercase_ , lowercase_ : Any = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": lowercase_ : str = True lowercase_ , lowercase_ : Union[str, Any] = line.rsplit(''' ''' , 1 ) else: lowercase_ : Tuple = False lowercase_ : Optional[int] = int(A ) lowercase_ : Optional[int] = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(A ) ) self.add_symbol(A , n=A , overwrite=A ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def lowercase ( __snake_case : Dict ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowercase_ : Dict = dict((re.sub(r'''@@$''' , '''''' , __snake_case ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , __snake_case ), v) for k, v in d.items() ) lowercase_ : int = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] lowercase_ : Union[str, Any] = d[k] # restore return da def lowercase ( __snake_case : Tuple , __snake_case : Any ): # prep if not os.path.exists(__snake_case ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(__snake_case , exist_ok=__snake_case ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models lowercase_ : Optional[Any] = os.path.join(__snake_case , '''checkpoint.pt''' ) if not os.path.isfile(__snake_case ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) lowercase_ : Union[str, Any] = torch.load(__snake_case , map_location='''cpu''' ) lowercase_ : int = chkpt['''cfg''']['''model'''] # dicts lowercase_ : int = os.path.join(__snake_case , '''dict.txt''' ) if not os.path.isfile(__snake_case ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) lowercase_ : str = Dictionary.load(__snake_case ) lowercase_ : List[str] = rewrite_dict_keys(src_dict.indices ) lowercase_ : Dict = len(__snake_case ) lowercase_ : int = os.path.join(__snake_case , VOCAB_FILES_NAMES['''vocab_file'''] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) ) # merges_file (bpecodes) lowercase_ : Optional[int] = os.path.join(__snake_case , '''bpecodes''' ) if not os.path.isfile(__snake_case ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) lowercase_ : List[Any] = os.path.join(__snake_case , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(__snake_case , __snake_case ) # model config lowercase_ : Union[str, Any] = os.path.join(__snake_case , '''config.json''' ) lowercase_ : Dict = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1e-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) ) # tokenizer config lowercase_ : Optional[int] = os.path.join(__snake_case , __snake_case ) lowercase_ : str = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1_0_2_4, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) ) # model lowercase_ : Tuple = chkpt['''model'''] # remove unneeded keys lowercase_ : Dict = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(__snake_case , __snake_case ) lowercase_ : List[Any] = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): lowercase_ : Optional[int] = model_state_dict.pop(__snake_case ) else: lowercase_ : str = model_state_dict.pop(__snake_case ) lowercase_ : int = BioGptConfig.from_pretrained(__snake_case ) lowercase_ : int = BioGptForCausalLM(__snake_case ) # check that it loads ok model_new.load_state_dict(__snake_case ) # save lowercase_ : Optional[int] = os.path.join(__snake_case , __snake_case ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(__snake_case , __snake_case ) print('''Conversion is done!''' ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __A : Dict = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
33
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A ={ 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A =[ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
34
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
0
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray: snake_case__ : Any = cva.getAffineTransform(_lowerCAmelCase , _lowerCAmelCase ) return cva.warpAffine(_lowerCAmelCase , _lowerCAmelCase , (rows, cols) ) if __name__ == "__main__": # read original image __a = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value __a = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __a , __a = gray_img.shape # set different points to rotate image __a = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __a = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __a = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __a = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __a = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __a = plt.figure(1) __a = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
35
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
0
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def A ( _lowerCamelCase , _lowerCamelCase=7 ): '''simple docstring''' _lowerCAmelCase : Any = None if token is not None: _lowerCAmelCase : Tuple = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"} # The id of a workflow (not of a workflow run) _lowerCAmelCase : Any = "636036" _lowerCAmelCase : List[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" _lowerCAmelCase : Optional[int] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json() return result["workflow_runs"] def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : str = get_daily_ci_runs(_lowerCamelCase ) _lowerCAmelCase : List[Any] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _lowerCAmelCase : str = workflow_run["id"] break return workflow_run_id def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Any = get_last_daily_ci_runs(_lowerCamelCase ) if workflow_run_id is not None: _lowerCAmelCase : int = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _lowerCAmelCase : Optional[int] = artifacts_links[artifact_name] download_artifact( artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : str = {} for artifact_name in artifact_names: _lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , F"{artifact_name}.zip" ) if os.path.isfile(_lowerCamelCase ): _lowerCAmelCase : Dict = {} with zipfile.ZipFile(_lowerCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCamelCase ): # read the file with z.open(_lowerCamelCase ) as f: _lowerCAmelCase : str = f.read().decode("UTF-8" ) return results
36
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
0
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _lowerCAmelCase = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: with open(UpperCamelCase , """rb""" ) as flax_state_f: lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(UpperCamelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ): """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values() if any(UpperCamelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) lowerCAmelCase__ : Dict = jax.tree_util.tree_map( lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase ) lowerCAmelCase__ : Any = """""" lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" ) lowerCAmelCase__ : Optional[int] = pt_model.state_dict() # keep track of unexpected & missing keys lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""] lowerCAmelCase__ : Any = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(UpperCamelCase ): lowerCAmelCase__ : List[str] = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase ) # remove from missing keys missing_keys.remove(UpperCamelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(UpperCamelCase ) pt_model.load_state_dict(UpperCamelCase ) # re-transform missing_keys to list lowerCAmelCase__ : Optional[int] = list(UpperCamelCase ) if len(UpperCamelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(UpperCamelCase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" """ use it for predictions and inference.""" ) return pt_model
37
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
0
import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging UpperCAmelCase_ : Dict = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE : snake_case__ : Dict = None @experimental def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Dict ) -> Optional[int]: """simple docstring""" UpperCamelCase :Optional[Any] = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ ) UpperCamelCase :int = [] # We organize the splits ourselve (contiguous splits) for index in range(__magic_name__ ): UpperCamelCase :str = len(__magic_name__ ) // num_proc UpperCamelCase :List[Any] = len(__magic_name__ ) % num_proc UpperCamelCase :Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f"""Error dividing inputs iterable among processes. """ f"""Total number of objects {len(__magic_name__ )}, """ f"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( f"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) UpperCamelCase , UpperCamelCase :List[str] = None, None if not disable_tqdm: UpperCamelCase , UpperCamelCase :Dict = (RLock(),), tqdm.set_lock with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool: UpperCamelCase :Dict = pool.map(__magic_name__ , __magic_name__ ) logger.info(f"""Finished {num_proc} processes""" ) UpperCamelCase :Optional[Any] = [obj for proc_res in mapped for obj in proc_res] logger.info(f"""Unpacked {len(__magic_name__ )} objects""" ) return mapped def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ): return joblib.Parallel()( joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> List[Any]: """simple docstring""" UpperCamelCase :Union[str, Any] = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: UpperCamelCase :Union[str, Any] = None
38
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
0
def __A ( __lowerCAmelCase )-> bool: """simple docstring""" _UpperCAmelCase = [int(__lowerCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()] return len(__lowerCAmelCase ) == 4 and all(0 <= int(__lowerCAmelCase ) <= 254 for octet in octets ) if __name__ == "__main__": _a = input().strip() _a = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
39
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowercase = logging.get_logger(__name__) def lowercase ( A_ )-> YolosConfig: '''simple docstring''' a : Any = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: a : List[str] = 192 a : int = 768 a : Dict = 12 a : Any = 3 a : List[Any] = [800, 1_333] a : List[str] = False elif yolos_name == "yolos_s_dWr": a : str = 330 a : Optional[int] = 14 a : Dict = 6 a : Union[str, Any] = 1_320 elif "yolos_s" in yolos_name: a : Any = 384 a : List[Any] = 1_536 a : Any = 12 a : List[Any] = 6 elif "yolos_b" in yolos_name: a : Dict = [800, 1_344] a : Tuple = 91 a : Union[str, Any] = "huggingface/label-files" a : str = "coco-detection-id2label.json" a : Union[str, Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) ) a : int = {int(A_ ): v for k, v in idalabel.items()} a : List[Any] = idalabel a : Optional[Any] = {v: k for k, v in idalabel.items()} return config def lowercase ( A_ , A_ , A_ = False )-> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) a : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a : Optional[Any] = in_proj_weight[: config.hidden_size, :] a : Any = in_proj_bias[: config.hidden_size] a : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a : Any = in_proj_weight[-config.hidden_size :, :] a : str = in_proj_bias[-config.hidden_size :] def lowercase ( A_ )-> str: '''simple docstring''' if "backbone" in name: a : Any = name.replace("backbone" , "vit" ) if "cls_token" in name: a : str = name.replace("cls_token" , "embeddings.cls_token" ) if "det_token" in name: a : Union[str, Any] = name.replace("det_token" , "embeddings.detection_tokens" ) if "mid_pos_embed" in name: a : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" ) if "pos_embed" in name: a : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" ) if "patch_embed.proj" in name: a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "blocks" in name: a : Optional[int] = name.replace("blocks" , "encoder.layer" ) if "attn.proj" in name: a : List[str] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a : Optional[int] = name.replace("attn" , "attention.self" ) if "norm1" in name: a : List[Any] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a : List[Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a : int = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a : str = name.replace("mlp.fc2" , "output.dense" ) if "class_embed" in name: a : Union[str, Any] = name.replace("class_embed" , "class_labels_classifier" ) if "bbox_embed" in name: a : int = name.replace("bbox_embed" , "bbox_predictor" ) if "vit.norm" in name: a : Union[str, Any] = name.replace("vit.norm" , "vit.layernorm" ) return name def lowercase ( A_ , A_ )-> dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): a : Optional[int] = orig_state_dict.pop(A_ ) if "qkv" in key: a : Optional[Any] = key.split("." ) a : List[str] = int(key_split[2] ) a : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: a : Union[str, Any] = val[:dim, :] a : Union[str, Any] = val[ dim : dim * 2, : ] a : List[str] = val[-dim:, :] else: a : Optional[Any] = val[:dim] a : List[Any] = val[dim : dim * 2] a : int = val[-dim:] else: a : int = val return orig_state_dict def lowercase ( )-> torch.Tensor: '''simple docstring''' a : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" a : str = Image.open(requests.get(A_ , stream=A_ ).raw ) return im @torch.no_grad() def lowercase ( A_ , A_ , A_ , A_ = False )-> Any: '''simple docstring''' a : str = get_yolos_config(A_ ) # load original state_dict a : List[str] = torch.load(A_ , map_location="cpu" )["model"] # load 🤗 model a : List[str] = YolosForObjectDetection(A_ ) model.eval() a : Tuple = convert_state_dict(A_ , A_ ) model.load_state_dict(A_ ) # Check outputs on an image, prepared by YolosImageProcessor a : Dict = 800 if yolos_name != "yolos_ti" else 512 a : List[str] = YolosImageProcessor(format="coco_detection" , size=A_ ) a : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) a : str = model(**A_ ) a , a : Tuple = outputs.logits, outputs.pred_boxes a , a : Dict = None, None if yolos_name == "yolos_ti": a : str = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) a : int = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": a : Any = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) a : str = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": a : Optional[Any] = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) a : Optional[int] = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": a : List[str] = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) a : List[Any] = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": a : Any = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) a : Union[str, Any] = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(F'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] , A_ , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , A_ , atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: a : Tuple = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) a : Any = model_mapping[yolos_name] image_processor.push_to_hub(A_ , organization="hustvl" ) model.push_to_hub(A_ , organization="hustvl" ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--yolos_name""", default="""yolos_s_200_pre""", type=str, help=( """Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',""" """ 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'.""" ), ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) __lowercase = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
40
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _A : Dict =logging.get_logger(__name__) _A : List[Any] ={ '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _lowercase ( _lowercase ): a = """vivit""" def __init__( self: List[str] , UpperCamelCase__: List[Any]=224 , UpperCamelCase__: Tuple=32 , UpperCamelCase__: int=[2, 16, 16] , UpperCamelCase__: Optional[Any]=3 , UpperCamelCase__: Dict=768 , UpperCamelCase__: Optional[int]=12 , UpperCamelCase__: Tuple=12 , UpperCamelCase__: List[str]=3_072 , UpperCamelCase__: Optional[int]="gelu_fast" , UpperCamelCase__: Union[str, Any]=0.0 , UpperCamelCase__: Any=0.0 , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Optional[Any]=1e-06 , UpperCamelCase__: List[str]=True , **UpperCamelCase__: List[Any] , ): lowerCamelCase__ : List[Any] = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Union[str, Any] = hidden_act lowerCamelCase__ : Dict = hidden_dropout_prob lowerCamelCase__ : Tuple = attention_probs_dropout_prob lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : Optional[int] = layer_norm_eps lowerCamelCase__ : int = image_size lowerCamelCase__ : str = num_frames lowerCamelCase__ : Optional[Any] = tubelet_size lowerCamelCase__ : int = num_channels lowerCamelCase__ : Any = qkv_bias super().__init__(**UpperCamelCase__ )
41
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
0
'''simple docstring''' from collections import defaultdict from math import gcd def SCREAMING_SNAKE_CASE__ ( __A = 1_500_000 ) -> int: _snake_case = defaultdict(__A ) _snake_case = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ): if gcd(__A , __A ) > 1: continue _snake_case = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__A , limit + 1 , __A ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
42
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = {'''vocab_file''': '''spiece.model'''} __lowercase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } __lowercase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } __lowercase = '''▁''' class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : Tuple = VOCAB_FILES_NAMES a__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __lowercase , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase="[CLS]" , __lowercase="[SEP]" , __lowercase="<unk>" , __lowercase="[SEP]" , __lowercase="<pad>" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase = None , **__lowercase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __UpperCamelCase :Union[str, Any] = ( AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase) if isinstance(__lowercase , __lowercase) else mask_token ) __UpperCamelCase :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) __UpperCamelCase :str = do_lower_case __UpperCamelCase :str = remove_space __UpperCamelCase :Union[str, Any] = keep_accents __UpperCamelCase :Dict = vocab_file __UpperCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(__lowercase) @property def UpperCamelCase__ ( self) -> Any: return len(self.sp_model) def UpperCamelCase__ ( self) -> List[Any]: __UpperCamelCase :Optional[int] = {self.convert_ids_to_tokens(__lowercase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Union[str, Any]: __UpperCamelCase :List[str] = self.__dict__.copy() __UpperCamelCase :Union[str, Any] = None return state def __setstate__( self , __lowercase) -> Any: __UpperCamelCase :int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): __UpperCamelCase :Dict = {} __UpperCamelCase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def UpperCamelCase__ ( self , __lowercase) -> List[str]: if self.remove_space: __UpperCamelCase :List[Any] = ''' '''.join(inputs.strip().split()) else: __UpperCamelCase :List[Any] = inputs __UpperCamelCase :str = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''') if not self.keep_accents: __UpperCamelCase :Optional[Any] = unicodedata.normalize('''NFKD''' , __lowercase) __UpperCamelCase :List[str] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowercase)]) if self.do_lower_case: __UpperCamelCase :List[Any] = outputs.lower() return outputs def UpperCamelCase__ ( self , __lowercase) -> List[str]: __UpperCamelCase :Optional[Any] = self.preprocess_text(__lowercase) __UpperCamelCase :List[str] = self.sp_model.encode(__lowercase , out_type=__lowercase) __UpperCamelCase :Any = [] for piece in pieces: if len(__lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit(): __UpperCamelCase :Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowercase , '''''')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: __UpperCamelCase :int = cur_pieces[1:] else: __UpperCamelCase :List[Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(__lowercase) else: new_pieces.append(__lowercase) return new_pieces def UpperCamelCase__ ( self , __lowercase) -> str: return self.sp_model.PieceToId(__lowercase) def UpperCamelCase__ ( self , __lowercase) -> int: return self.sp_model.IdToPiece(__lowercase) def UpperCamelCase__ ( self , __lowercase) -> List[Any]: __UpperCamelCase :int = [] __UpperCamelCase :List[Any] = '''''' __UpperCamelCase :int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowercase) + token __UpperCamelCase :List[str] = True __UpperCamelCase :Optional[Any] = [] else: current_sub_tokens.append(__lowercase) __UpperCamelCase :Tuple = False out_string += self.sp_model.decode(__lowercase) return out_string.strip() def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]: __UpperCamelCase :str = [self.sep_token_id] __UpperCamelCase :Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase) if token_ids_a is not None: return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1] return [1] + ([0] * len(__lowercase)) + [1] def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]: __UpperCamelCase :List[Any] = [self.sep_token_id] __UpperCamelCase :Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]: if not os.path.isdir(__lowercase): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return __UpperCamelCase :List[Any] = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowercase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __lowercase) elif not os.path.isfile(self.vocab_file): with open(__lowercase , '''wb''') as fi: __UpperCamelCase :Optional[int] = self.sp_model.serialized_model_proto() fi.write(__lowercase) return (out_vocab_file,)
43
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : List[Any] = KandinskyImgaImgPipeline _UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"] _UpperCamelCase : List[Any] = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] _UpperCamelCase : Dict = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _UpperCamelCase : Union[str, Any] = False @property def __A ( self ): return 32 @property def __A ( self ): return 32 @property def __A ( self ): return self.time_input_dim @property def __A ( self ): return self.time_input_dim * 4 @property def __A ( self ): return 100 @property def __A ( self ): _lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) _lowerCAmelCase : int = MultilingualCLIP(a__ ) _lowerCAmelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : str = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ ) return model @property def __A ( self ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self ): _lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder _lowerCAmelCase : List[Any] = self.dummy_tokenizer _lowerCAmelCase : int = self.dummy_unet _lowerCAmelCase : Dict = self.dummy_movq _lowerCAmelCase : Tuple = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ ) _lowerCAmelCase : List[Any] = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __A ( self , a__ , a__=0 ): _lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ ) _lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ ) # create init_image _lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) ) if str(a__ ).startswith("""mps""" ): _lowerCAmelCase : List[Any] = torch.manual_seed(a__ ) else: _lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ ) _lowerCAmelCase : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def __A ( self ): _lowerCAmelCase : Any = """cpu""" _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : int = self.pipeline_class(**a__ ) _lowerCAmelCase : Optional[int] = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) ) _lowerCAmelCase : List[Any] = output.images _lowerCAmelCase : Tuple = pipe( **self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0] _lowerCAmelCase : Dict = image[0, -3:, -3:, -1] _lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : str = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __A ( unittest.TestCase ): def __A ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self ): _lowerCAmelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) _lowerCAmelCase : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k""" _lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(a__ ) _lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) _lowerCAmelCase : Any = pipeline.to(a__ ) pipeline.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase : Union[str, Any] = pipeline( a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) _lowerCAmelCase : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(a__ , a__ )
44
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
0
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __UpperCAmelCase : str = BioGptTokenizer __UpperCAmelCase : List[Any] = False def __UpperCAmelCase ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __a = dict(zip(_a , range(len(_a ) ) ) ) __a = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] __a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(_a ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(_a ) ) def __UpperCAmelCase ( self , _a ): __a = '''lower newer''' __a = '''lower newer''' return input_text, output_text def __UpperCAmelCase ( self ): __a = BioGptTokenizer(self.vocab_file , self.merges_file ) __a = '''lower''' __a = ['''low''', '''er</w>'''] __a = tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) __a = tokens + ['''<unk>'''] __a = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) @slow def __UpperCAmelCase ( self ): __a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a ) __a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a ) __a = tokenizer.build_inputs_with_special_tokens(_a ) __a = tokenizer.build_inputs_with_special_tokens(_a , _a ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
45
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
0
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] SCREAMING_SNAKE_CASE__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right SCREAMING_SNAKE_CASE__ = tuple[int, int] class lowercase : def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> None: lowerCAmelCase = pos_x lowerCAmelCase = pos_y lowerCAmelCase = (pos_y, pos_x) lowerCAmelCase = goal_x lowerCAmelCase = goal_y lowerCAmelCase = g_cost lowerCAmelCase = parent lowerCAmelCase = self.calculate_heuristic() lowerCAmelCase = self.g_cost + self.h_cost def _snake_case ( self ) -> float: lowerCAmelCase = self.pos_x - self.goal_x lowerCAmelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase ) + abs(lowercase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , lowercase ) -> bool: return self.f_cost < other.f_cost class lowercase : def __init__( self , lowercase , lowercase ) -> List[Any]: lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase ) lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowercase ) lowerCAmelCase = [self.start] lowerCAmelCase = [] lowerCAmelCase = False def _snake_case ( self ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCAmelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase ) self.closed_nodes.append(lowercase ) lowerCAmelCase = self.get_successors(lowercase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase ) else: # retrieve the best current path lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowercase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase ) else: self.open_nodes.append(lowercase ) return [self.start.pos] def _snake_case ( self , lowercase ) -> list[Node]: lowerCAmelCase = [] for action in delta: lowerCAmelCase = parent.pos_x + action[1] lowerCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase , lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase , ) ) return successors def _snake_case ( self , lowercase ) -> list[TPosition]: lowerCAmelCase = node lowerCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCAmelCase = current_node.parent path.reverse() return path class lowercase : def __init__( self , lowercase , lowercase ) -> None: lowerCAmelCase = AStar(lowercase , lowercase ) lowerCAmelCase = AStar(lowercase , lowercase ) lowerCAmelCase = False def _snake_case ( self ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 ) lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase , lowercase ) self.fwd_astar.closed_nodes.append(lowercase ) self.bwd_astar.closed_nodes.append(lowercase ) lowerCAmelCase = current_bwd_node lowerCAmelCase = current_fwd_node lowerCAmelCase = { self.fwd_astar: self.fwd_astar.get_successors(lowercase ), self.bwd_astar: self.bwd_astar.get_successors(lowercase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase ) else: # retrieve the best current path lowerCAmelCase = astar.open_nodes.pop( astar.open_nodes.index(lowercase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase ) else: astar.open_nodes.append(lowercase ) return [self.fwd_astar.start.pos] def _snake_case ( self , lowercase , lowercase ) -> list[TPosition]: lowerCAmelCase = self.fwd_astar.retrace_path(lowercase ) lowerCAmelCase = self.bwd_astar.retrace_path(lowercase ) bwd_path.pop() bwd_path.reverse() lowerCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] SCREAMING_SNAKE_CASE__ = (0, 0) SCREAMING_SNAKE_CASE__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) SCREAMING_SNAKE_CASE__ = time.time() SCREAMING_SNAKE_CASE__ = AStar(init, goal) SCREAMING_SNAKE_CASE__ = a_star.search() SCREAMING_SNAKE_CASE__ = time.time() - start_time print(f'AStar execution time = {end_time:f} seconds') SCREAMING_SNAKE_CASE__ = time.time() SCREAMING_SNAKE_CASE__ = BidirectionalAStar(init, goal) SCREAMING_SNAKE_CASE__ = time.time() - bd_start_time print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
46
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : List[Any] = { "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] = [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTapasPreTrainedModel", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
0
import logging from transformers.configuration_utils import PretrainedConfig SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__) class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Optional[Any] = """masked_bert""" def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="topK" , UpperCamelCase__="constant" , UpperCamelCase__=0.0 , **UpperCamelCase__ , ) -> List[Any]: super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : int = vocab_size lowerCamelCase : Tuple = hidden_size lowerCamelCase : Dict = num_hidden_layers lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : List[str] = hidden_act lowerCamelCase : List[str] = intermediate_size lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Tuple = attention_probs_dropout_prob lowerCamelCase : Tuple = max_position_embeddings lowerCamelCase : Any = type_vocab_size lowerCamelCase : str = initializer_range lowerCamelCase : Union[str, Any] = layer_norm_eps lowerCamelCase : int = pruning_method lowerCamelCase : Tuple = mask_init lowerCamelCase : List[Any] = mask_scale
48
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
0
from __future__ import annotations __snake_case :str = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): __a = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) ) ] # the reference grid __a = 1 __a = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCAmelCase ) ) ] # the action grid __a = init[0] __a = init[1] __a = 0 __a = g + heuristic[x][y] # cost from starting cell to destination cell __a = [[f, g, x, y]] __a = False # flag that is set when search is complete __a = False # flag set if we can't find expand while not found and not resign: if len(_UpperCAmelCase ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __a = cell.pop() __a = next_cell[2] __a = next_cell[3] __a = next_cell[1] if x == goal[0] and y == goal[1]: __a = True else: for i in range(len(_UpperCAmelCase ) ): # to try out different valid actions __a = x + DIRECTIONS[i][0] __a = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(_UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __a = g + cost __a = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __a = 1 __a = i __a = [] __a = goal[0] __a = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __a = x - DIRECTIONS[action[x][y]][0] __a = y - DIRECTIONS[action[x][y]][1] __a = xa __a = ya invpath.append([x, y] ) __a = [] for i in range(len(_UpperCAmelCase ) ): path.append(invpath[len(_UpperCAmelCase ) - 1 - i] ) return path, action if __name__ == "__main__": __snake_case :Dict = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __snake_case :List[Any] = [0, 0] # all coordinates are given in format [y,x] __snake_case :Tuple = [len(grid) - 1, len(grid[0]) - 1] __snake_case :Any = 1 # the cost map which pushes the path closer to the goal __snake_case :Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __snake_case :Union[str, Any] = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __snake_case :int = 99 __snake_case ,__snake_case :int = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
49
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
0
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> list[float]: lowerCamelCase__ , lowerCamelCase__ : List[str] = coefficient_matrix.shape lowerCamelCase__ , lowerCamelCase__ : Tuple = constant_matrix.shape if rowsa != colsa: lowerCamelCase__ : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(_UpperCAmelCase ) if colsa != 1: lowerCamelCase__ : Tuple = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(_UpperCAmelCase ) if rowsa != rowsa: lowerCamelCase__ : Optional[Any] = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' F"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(_UpperCAmelCase ) if len(_UpperCAmelCase ) != rowsa: lowerCamelCase__ : Tuple = ( 'Number of initial values must be equal to number of rows in coefficient ' F"""matrix but received {len(_UpperCAmelCase )} and {rowsa}""" ) raise ValueError(_UpperCAmelCase ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) lowerCamelCase__ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) lowerCamelCase__ , lowerCamelCase__ : List[str] = table.shape strictly_diagonally_dominant(_UpperCAmelCase ) # Iterates the whole matrix for given number of times for _ in range(_UpperCAmelCase ): lowerCamelCase__ : str = [] for row in range(_UpperCAmelCase ): lowerCamelCase__ : Dict = 0 for col in range(_UpperCAmelCase ): if col == row: lowerCamelCase__ : int = table[row][col] elif col == cols - 1: lowerCamelCase__ : Optional[int] = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowerCamelCase__ : Optional[Any] = (temp + val) / denom new_val.append(_UpperCAmelCase ) lowerCamelCase__ : List[str] = new_val return [float(_UpperCAmelCase ) for i in new_val] def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool: lowerCamelCase__ , lowerCamelCase__ : List[Any] = table.shape lowerCamelCase__ : Optional[int] = True for i in range(0 , _UpperCAmelCase ): lowerCamelCase__ : Optional[Any] = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
50
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
0
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(a ) , '''Tatoeba directory does not exist.''' ) class __snake_case ( unittest.TestCase ): @cached_property def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() return TatoebaConverter(save_dir=_snake_case) @slow def lowerCamelCase ( self : str): """simple docstring""" self.resolver.convert_models(['''heb-eng''']) @slow def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_snake_case) assert mmeta["long_pair"] == "heb-eng"
51
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[str] = params UpperCamelCase : Union[str, Any] = np.array(A_ ) UpperCamelCase : Optional[int] = np.array([len(A_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , A_ ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def __UpperCamelCase( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.params.max_model_input_size UpperCamelCase : Dict = self.lengths > max_len logger.info(F"""Splitting {sum(A_ )} too long sequences.""" ) def divide_chunks(A_ , A_ ): return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )] UpperCamelCase : List[str] = [] UpperCamelCase : Tuple = [] if self.params.mlm: UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: UpperCamelCase , UpperCamelCase : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: UpperCamelCase : Dict = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: UpperCamelCase : Union[str, Any] = np.insert(A_ , 0 , A_ ) if sub_s[-1] != sep_id: UpperCamelCase : Optional[int] = np.insert(A_ , len(A_ ) , A_ ) assert len(A_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(A_ ) new_tok_ids.extend(A_ ) new_lengths.extend([len(A_ ) for l in sub_seqs] ) UpperCamelCase : Union[str, Any] = np.array(A_ ) UpperCamelCase : Union[str, Any] = np.array(A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = len(self ) UpperCamelCase : Dict = self.lengths > 11 UpperCamelCase : List[str] = self.token_ids[indices] UpperCamelCase : Tuple = self.lengths[indices] UpperCamelCase : Optional[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def __UpperCamelCase( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: UpperCamelCase : List[str] = self.params.special_tok_ids["unk_token"] UpperCamelCase : int = len(self ) UpperCamelCase : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) UpperCamelCase : List[Any] = (unk_occs / self.lengths) < 0.5 UpperCamelCase : List[Any] = self.token_ids[indices] UpperCamelCase : str = self.lengths[indices] UpperCamelCase : Dict = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def __UpperCamelCase( self ): '''simple docstring''' if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : str = [t[0] for t in batch] UpperCamelCase : str = [t[1] for t in batch] assert len(A_ ) == len(A_ ) # Max for paddings UpperCamelCase : Union[str, Any] = max(A_ ) # Pad token ids if self.params.mlm: UpperCamelCase : List[Any] = self.params.special_tok_ids["pad_token"] else: UpperCamelCase : Dict = self.params.special_tok_ids["unk_token"] UpperCamelCase : Optional[int] = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids] assert len(tk_ ) == len(A_ ) assert all(len(A_ ) == max_seq_len_ for t in tk_ ) UpperCamelCase : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) UpperCamelCase : Any = torch.tensor(A_ ) # (bs) return tk_t, lg_t
52
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
0
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor" SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ): __UpperCamelCase = False super().__init__(__A , __A ) __UpperCamelCase = self.image_processor def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __UpperCamelCase = self.tokenizer __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) return text_encoding # add pixel_values __UpperCamelCase = self.image_processor(__A , return_tensors=__A ) if text is not None: __UpperCamelCase = self.tokenizer( text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , ) else: __UpperCamelCase = None if text_encoding is not None: encoding_image_processor.update(__A ) return encoding_image_processor def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ): return self.tokenizer.decode(*__A , **__A ) @property def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
53
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
0
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers a__ : Dict = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
54
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
0
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __snake_case ( UpperCAmelCase_ : List[str] ): return getitem, k def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ): return setitem, k, v def __snake_case ( UpperCAmelCase_ : Optional[Any] ): return delitem, k def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] ): try: return fun(UpperCAmelCase_ , *UpperCAmelCase_ ), None except Exception as e: return None, e a_ : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a_ : int = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a_ : Tuple = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a_ : Optional[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a_ : int = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a_ : List[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def __snake_case ( UpperCAmelCase_ : int ): lowerCamelCase_ = HashMap(initial_block_size=4 ) lowerCamelCase_ = {} for _, (fun, *args) in enumerate(UpperCAmelCase_ ): lowerCamelCase_ ,lowerCamelCase_ = _run_operation(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ ) lowerCamelCase_ ,lowerCamelCase_ = _run_operation(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ ) assert my_res == py_res assert str(UpperCAmelCase_ ) == str(UpperCAmelCase_ ) assert set(UpperCAmelCase_ ) == set(UpperCAmelCase_ ) assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) assert set(my.items() ) == set(py.items() ) def __snake_case ( ): def is_public(UpperCAmelCase_ : str ) -> bool: return not name.startswith("_" ) lowerCamelCase_ = {name for name in dir({} ) if is_public(UpperCAmelCase_ )} lowerCamelCase_ = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase_ )} assert dict_public_names > hash_public_names
55
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
0
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class a ( unittest.TestCase ): def A_ ( self : str , lowercase_ : List[str] ): snake_case_ = 3 snake_case_ = 250 snake_case_ = ids_tensor((batch_size, length) , lowercase_ ) snake_case_ = torch.ones((batch_size, length) , device=lowercase_ , dtype=torch.float ) / length return input_ids, scores def A_ ( self : List[Any] ): snake_case_ ,snake_case_ = self._get_tensors(5 ) snake_case_ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) snake_case_ ,snake_case_ = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) snake_case_ ,snake_case_ = self._get_tensors(10 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def A_ ( self : str ): snake_case_ = MaxLengthCriteria(max_length=10 ) snake_case_ ,snake_case_ = self._get_tensors(5 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) snake_case_ ,snake_case_ = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) snake_case_ ,snake_case_ = self._get_tensors(10 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def A_ ( self : Optional[Any] ): snake_case_ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) snake_case_ ,snake_case_ = self._get_tensors(5 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) snake_case_ ,snake_case_ = self._get_tensors(9 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) snake_case_ ,snake_case_ = self._get_tensors(10 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) snake_case_ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def A_ ( self : List[Any] ): snake_case_ ,snake_case_ = self._get_tensors(5 ) snake_case_ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowercase_ , lowercase_ ) ) snake_case_ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowercase_ , lowercase_ ) ) def A_ ( self : Dict ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowercase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) snake_case_ = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowercase_ ) , 1 )
56
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
0
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class _UpperCamelCase : '''simple docstring''' def __init__( self , __a ): __lowerCAmelCase = str(id_ ) __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = [] __lowerCAmelCase = {} # {vertex:distance} def __lt__( self , __a ): return self.key < other.key def __repr__( self ): return self.id def snake_case ( self , __a ): self.neighbors.append(__a ) def snake_case ( self , __a , __a ): __lowerCAmelCase = weight def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase ) graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] for u in graph: __lowerCAmelCase = math.inf __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = graph[:] while q: __lowerCAmelCase = min(_UpperCamelCase ) q.remove(_UpperCamelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __lowerCAmelCase = u __lowerCAmelCase = u.edges[v.id] for i in range(1 , len(_UpperCamelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' for u in graph: __lowerCAmelCase = math.inf __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = list(_UpperCamelCase ) hq.heapify(_UpperCamelCase ) while h: __lowerCAmelCase = hq.heappop(_UpperCamelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __lowerCAmelCase = u __lowerCAmelCase = u.edges[v.id] hq.heapify(_UpperCamelCase ) for i in range(1 , len(_UpperCamelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _lowerCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
57
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = """▁""" lowercase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } lowercase_ = { """google/pegasus-xsum""": 512, } class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = PegasusTokenizer UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , A=None , A=None , A="<pad>" , A="</s>" , A="<unk>" , A="<mask_2>" , A="<mask_1>" , A=None , A=103 , **A , ) -> List[Any]: _SCREAMING_SNAKE_CASE = offset if additional_special_tokens is not None: if not isinstance(A , A ): raise TypeError( f'additional_special_tokens should be of type {type(A )}, but is' f' {type(A )}' ) _SCREAMING_SNAKE_CASE = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'<unk_{i}>' for i in range(len(A ) , self.offset - 1 ) ] if len(set(A ) ) != len(A ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' ) _SCREAMING_SNAKE_CASE = additional_special_tokens_extended else: _SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )] super().__init__( A , tokenizer_file=A , pad_token=A , eos_token=A , unk_token=A , mask_token=A , mask_token_sent=A , offset=A , additional_special_tokens=A , **A , ) _SCREAMING_SNAKE_CASE = vocab_file _SCREAMING_SNAKE_CASE = False if not self.vocab_file else True def snake_case_( self , A ) -> Any: _SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( """There should be 3 special tokens: mask_token, pad_token, and eos_token +""" f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' ) return [1 if x in all_special_ids else 0 for x in seq] def snake_case_( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return self._special_token_mask(A ) elif token_ids_a is None: return self._special_token_mask(A ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def snake_case_( self , A , A=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def snake_case_( self , A , A = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _SCREAMING_SNAKE_CASE = os.path.join( A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ): copyfile(self.vocab_file , A ) return (out_vocab_file,)
58
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase ( A_ ): A__ : Optional[Any] = ["image_processor", "tokenizer"] A__ : Tuple = "AutoImageProcessor" A__ : Optional[int] = "AutoTokenizer" def __init__(self : str , snake_case__ : List[str] , snake_case__ : int ) -> Tuple: '''simple docstring''' super().__init__(snake_case__ , snake_case__ ) snake_case : int = self.image_processor def __call__(self : Optional[Any] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Dict=None , **snake_case__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: snake_case : Tuple = self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if images is not None: snake_case : List[str] = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ ) if text is not None and images is not None: snake_case : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Dict , *snake_case__ : Any , **snake_case__ : str ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Dict , *snake_case__ : Union[str, Any] , **snake_case__ : int ) -> Tuple: '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
59
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _snake_case ( _snake_case : list[list[float]] ): lowerCAmelCase : str = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase : int = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase, lowerCAmelCase : List[Any] = matrix[1][1], matrix[0][0] lowerCAmelCase, lowerCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_snake_case ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase : int = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creating cofactor matrix lowerCAmelCase : Dict = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase : Dict = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase : Any = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase : Optional[int] = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase : Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase : Dict = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase : str = array(_snake_case ) for i in range(3 ): for j in range(3 ): lowerCAmelCase : Optional[Any] = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase : Tuple = array(_snake_case ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_snake_case ) # Calculate the inverse of the matrix return [[float(d(_snake_case ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
60
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
0