code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ) -> str: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path snake_case_ : Union[str, Any] = quote(_UpperCamelCase ) return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' , revision=_UpperCamelCase )
279
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = DebertaVaTokenizer lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast lowerCAmelCase__ : str = True lowerCAmelCase__ : Tuple = True def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = '''this is a test''' lowercase__ = '''this is a test''' return input_text, output_text def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''<pad>''' lowercase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(UpperCamelCase ) , 30001 ) def UpperCamelCase__ (self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''This is a test''' lowercase__ = [13, 1, 4398, 25, 21, 1289] lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # fmt: off lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DebertaVaTokenizer(UpperCamelCase ) lowercase__ = tokenizer.encode('''sequence builders''' ) lowercase__ = tokenizer.encode('''multi-sequence build''' ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
0
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]: _enforce_args(UpperCamelCase , UpperCamelCase ) if n == 0: return 0 lowerCamelCase__ : List[str] = float("""-inf""" ) for i in range(1 , n + 1 ): lowerCamelCase__ : List[Any] = max( UpperCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase ) ) return max_revue def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict: _enforce_args(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Tuple = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: lowerCamelCase__ : int = float("""-inf""" ) for i in range(1 , n + 1 ): lowerCamelCase__ : List[Any] = max( UpperCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase , UpperCamelCase ) , ) lowerCamelCase__ : List[str] = max_revenue return max_rev[n] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any: _enforce_args(UpperCamelCase , UpperCamelCase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. lowerCamelCase__ : str = [float("""-inf""" ) for _ in range(n + 1 )] lowerCamelCase__ : str = 0 for i in range(1 , n + 1 ): lowerCamelCase__ : str = max_rev[i] for j in range(1 , i + 1 ): lowerCamelCase__ : Optional[int] = max(UpperCamelCase , prices[j - 1] + max_rev[i - j] ) lowerCamelCase__ : List[str] = max_revenue_i return max_rev[n] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: if n < 0: lowerCamelCase__ : List[str] = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(UpperCamelCase ) if n > len(UpperCamelCase ): lowerCamelCase__ : int = ( """Each integral piece of rod must have a corresponding price. """ f'''Got n = {n} but length of prices = {len(UpperCamelCase )}''' ) raise ValueError(UpperCamelCase ) def SCREAMING_SNAKE_CASE_ () -> Any: lowerCamelCase__ : int = [6, 10, 12, 15, 20, 23] lowerCamelCase__ : Dict = len(UpperCamelCase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. lowerCamelCase__ : Optional[int] = 36 lowerCamelCase__ : Union[str, Any] = top_down_cut_rod(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Tuple = bottom_up_cut_rod(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Optional[int] = naive_cut_rod_recursive(UpperCamelCase , UpperCamelCase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
41
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]: """simple docstring""" lowercase__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE (A ) -> List[str]: """simple docstring""" lowercase__ ,lowercase__ = emb.weight.shape lowercase__ = nn.Linear(A , A , bias=A ) lowercase__ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]: """simple docstring""" lowercase__ = torch.load(A , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A ) lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A ) if mbart_aa and finetuned: lowercase__ = '''relu''' lowercase__ = state_dict['''decoder.embed_tokens.weight'''] lowercase__ = MBartForConditionalGeneration(A ) model.model.load_state_dict(A ) if finetuned: lowercase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') lowerCamelCase : Any = parser.parse_args() lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowercase : List[str] = None lowercase : Dict = logging.get_logger(__name__) lowercase : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } lowercase : Optional[int] = { 'moussaKam/mbarthez': 10_24, 'moussaKam/barthez': 10_24, 'moussaKam/barthez-orangesum-title': 10_24, } lowercase : Tuple = '▁' class A ( lowercase_ ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["""input_ids""", """attention_mask"""] __magic_name__ = BarthezTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , **SCREAMING_SNAKE_CASE , ) -> Dict: """simple docstring""" A : str = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : int = vocab_file A : Any = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A : Any = [self.cls_token_id] A : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Optional[Any]: """simple docstring""" A : int = [self.sep_token_id] A : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Optional[int]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A : int = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCamelCase : List[Any] = logging.getLogger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ): '''simple docstring''' lowercase__ = label_idx def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: lowercase__ = [] lowercase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 lowercase__ = [] lowercase__ = [] else: lowercase__ = line.split(''' ''' ) words.append(splits[0] ) if len(UpperCamelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) return examples def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(UpperCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(UpperCamelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] ): '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(UpperCamelCase ): lowercase__ = [] lowercase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(UpperCamelCase ) == len(UpperCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 return examples def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for sentence in parse_incr(UpperCamelCase ): lowercase__ = preds_list[example_id] lowercase__ = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(UpperCamelCase ) example_id += 1 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
0
'''simple docstring''' import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters A =logging.get_logger(__name__) def snake_case_ (_a : Dict , _a : Optional[int] , _a : Optional[int] , _a : int=None , _a : str=None ): if "." in tensor_name: UpperCAmelCase = tensor_name.split('''.''' ) for split in splits[:-1]: UpperCAmelCase = getattr(_a , _a ) if new_module is None: raise ValueError(F"{module} has no attribute {split}." ) UpperCAmelCase = new_module UpperCAmelCase = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." ) UpperCAmelCase = tensor_name in module._buffers UpperCAmelCase = getattr(_a , _a ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." ) UpperCAmelCase = False UpperCAmelCase = False if is_buffer or not is_bitsandbytes_available(): UpperCAmelCase = False UpperCAmelCase = False else: UpperCAmelCase = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) UpperCAmelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: UpperCAmelCase = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: UpperCAmelCase = old_value.to(_a ) elif isinstance(_a , torch.Tensor ): UpperCAmelCase = value.to('''cpu''' ) if value.dtype == torch.inta: UpperCAmelCase = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: UpperCAmelCase = torch.tensor(_a , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , _a ) and fpaa_statistics is None: UpperCAmelCase = new_value.T UpperCAmelCase = old_value.__dict__ if is_abit: UpperCAmelCase = bnb.nn.IntaParams(_a , requires_grad=_a , **_a ).to(_a ) elif is_abit: UpperCAmelCase = bnb.nn.Paramsabit(_a , requires_grad=_a , **_a ).to(_a ) UpperCAmelCase = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(_a ) ) else: if value is None: UpperCAmelCase = old_value.to(_a ) elif isinstance(_a , torch.Tensor ): UpperCAmelCase = value.to(_a ) else: UpperCAmelCase = torch.tensor(_a , device=_a ) if is_buffer: UpperCAmelCase = new_value else: UpperCAmelCase = nn.Parameter(_a , requires_grad=old_value.requires_grad ) UpperCAmelCase = new_value def snake_case_ (_a : Tuple , _a : Any=None , _a : str=None , _a : List[Any]=None , _a : Tuple=False ): for name, module in model.named_children(): if current_key_name is None: UpperCAmelCase = [] current_key_name.append(_a ) if (isinstance(_a , nn.Linear ) or isinstance(_a , _a )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(_a ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(_a , _a ): UpperCAmelCase , UpperCAmelCase = module.weight.shape else: UpperCAmelCase = module.in_features UpperCAmelCase = module.out_features if quantization_config.quantization_method() == "llm_int8": UpperCAmelCase = bnb.nn.LinearabitLt( _a , _a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) UpperCAmelCase = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: UpperCAmelCase = bnb.nn.Linearabit( _a , _a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) UpperCAmelCase = True # Store the module class in case we need to transpose the weight later UpperCAmelCase = type(_a ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(_a ) if len(list(module.children() ) ) > 0: UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_linear( _a , _a , _a , _a , has_been_replaced=_a , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def snake_case_ (_a : Dict , _a : Any=None , _a : Tuple=None , _a : List[Any]=None ): UpperCAmelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert UpperCAmelCase , UpperCAmelCase = _replace_with_bnb_linear( _a , _a , _a , _a ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def snake_case_ (*_a : List[Any] , **_a : List[str] ): warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _a , ) return replace_with_bnb_linear(*_a , **_a ) def snake_case_ (*_a : Union[str, Any] , **_a : Union[str, Any] ): warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _a , ) return set_module_quantized_tensor_to_device(*_a , **_a ) def snake_case_ (_a : List[str] ): UpperCAmelCase = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() UpperCAmelCase = find_tied_parameters(_a ) # For compatibility with Accelerate < 0.18 if isinstance(_a , _a ): UpperCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCAmelCase = sum(_a , [] ) UpperCAmelCase = len(_a ) > 0 # Check if it is a base model UpperCAmelCase = not hasattr(_a , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCAmelCase = list(model.named_children() ) UpperCAmelCase = [list_modules[-1][0]] # add last module together with tied weights UpperCAmelCase = set(_a ) - set(_a ) UpperCAmelCase = list(set(_a ) ) + list(_a ) # remove ".weight" from the keys UpperCAmelCase = ['''.weight''', '''.bias'''] UpperCAmelCase = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCAmelCase = name.replace(_a , '''''' ) filtered_module_names.append(_a ) return filtered_module_names
34
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = """megatron-bert""" def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache
2
0
'''simple docstring''' import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase : def __init__( self : Any , __lowercase : Tuple ): """simple docstring""" __lowercase =data __lowercase =[0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0] @staticmethod def snake_case ( __lowercase : Dict , __lowercase : Optional[Any] ): """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF def snake_case ( self : Tuple ): """simple docstring""" __lowercase =b'\x80' + b'\x00' * (63 - (len(self.data ) + 8) % 64) __lowercase =self.data + padding + struct.pack('>Q' , 8 * len(self.data ) ) return padded_data def snake_case ( self : Optional[int] ): """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def snake_case ( self : Tuple , __lowercase : str ): """simple docstring""" __lowercase =list(struct.unpack('>16L' , __lowercase ) ) + [0] * 64 for i in range(16 , 80 ): __lowercase =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def snake_case ( self : List[str] ): """simple docstring""" __lowercase =self.padding() __lowercase =self.split_blocks() for block in self.blocks: __lowercase =self.expand_block(__lowercase ) __lowercase , __lowercase , __lowercase , __lowercase , __lowercase =self.h for i in range(0 , 80 ): if 0 <= i < 20: __lowercase =(b & c) | ((~b) & d) __lowercase =0x5A82_7999 elif 20 <= i < 40: __lowercase =b ^ c ^ d __lowercase =0x6ED9_EBA1 elif 40 <= i < 60: __lowercase =(b & c) | (b & d) | (c & d) __lowercase =0x8F1B_BCDC elif 60 <= i < 80: __lowercase =b ^ c ^ d __lowercase =0xCA62_C1D6 __lowercase , __lowercase , __lowercase , __lowercase , __lowercase =( self.rotate(__lowercase , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF, a, self.rotate(__lowercase , 30 ), c, d, ) __lowercase =( self.h[0] + a & 0xFFFF_FFFF, self.h[1] + b & 0xFFFF_FFFF, self.h[2] + c & 0xFFFF_FFFF, self.h[3] + d & 0xFFFF_FFFF, self.h[4] + e & 0xFFFF_FFFF, ) return ("{:08x}" * 5).format(*self.h ) def __UpperCamelCase ( ): '''simple docstring''' __lowercase =b'Test String' assert SHAaHash(lowercase__ ).final_hash() == hashlib.shaa(lowercase__ ).hexdigest() # noqa: S324 def __UpperCamelCase ( ): '''simple docstring''' __lowercase =argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string', dest='input_string', default='Hello World!! Welcome to Cryptography', help='Hash the string', ) parser.add_argument('--file', dest='input_file', help='Hash contents of a file' ) __lowercase =parser.parse_args() __lowercase =args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file, 'rb' ) as f: __lowercase =f.read() else: __lowercase =bytes(lowercase__, 'utf-8' ) print(SHAaHash(lowercase__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
141
'''simple docstring''' # Lint as: python3 import itertools import os import re lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])') lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])') lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)') lowerCamelCase : List[Any] = re.compile(R'(_{2,})') lowerCamelCase : str = R'^\w+(\.\w+)*$' lowerCamelCase : Dict = R'<>:/\|?*' def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A ) lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A ) return name.lower() def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = _single_underscore_re.split(A ) lowercase__ = [_multiple_underscores_re.split(A ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' ) def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , A ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(A )}-{split}" def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) if filetype_suffix: prefix += f".{filetype_suffix}" lowercase__ = os.path.join(A , A ) return f"{filepath}*" def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) lowercase__ = os.path.join(A , A ) if shard_lengths: lowercase__ = len(A ) lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )] if filetype_suffix: lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: lowercase__ = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
2
0
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def lowerCAmelCase__ ( lowerCamelCase_ : Any): '''simple docstring''' if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCamelCase__ ( nn.Module): '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]: """simple docstring""" super().__init__() lowerCAmelCase__ : Optional[Any] = module lowerCAmelCase__ : List[Any] = nn.Sequential( nn.Linear(module.in_features ,__lowerCamelCase ,bias=__lowerCamelCase ) ,nn.Linear(__lowerCamelCase ,module.out_features ,bias=__lowerCamelCase ) ,) lowerCAmelCase__ : Union[str, Any] = (2.0 / (5 * min(module.in_features ,module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight ,std=__lowerCamelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def lowerCAmelCase__ (self ,__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple: """simple docstring""" return self.module(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) + self.adapter(__lowerCamelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' snake_case_ ="""bigscience/bloom-1b7""" # Constant values snake_case_ =2.1_09_65_95_52_69_25_74 snake_case_ ="""Hello my name is""" snake_case_ =set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""") EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""") EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""") snake_case_ =10 def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(self.model_name ) class lowerCamelCase__ ( lowercase_): '''simple docstring''' def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" super().setUp() # Models and tokenizer lowerCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name ,torch_dtype=torch.floataa ,device_map='''auto''' ) lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Dict = self.model_abit.config self.assertTrue(hasattr(__lowerCamelCase ,'''quantization_config''' ) ) lowerCAmelCase__ : Union[str, Any] = config.to_dict() lowerCAmelCase__ : Optional[Any] = config.to_diff_dict() lowerCAmelCase__ : Any = config.to_json_string() def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" from bitsandbytes.nn import Paramsabit lowerCAmelCase__ : Any = self.model_fpaa.get_memory_footprint() lowerCAmelCase__ : Tuple = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit ,self.EXPECTED_RELATIVE_DIFFERENCE ) lowerCAmelCase__ : List[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(__lowerCamelCase ,torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : int = self.tokenizer(self.input_text ,return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=__lowerCamelCase ) ,self.EXPECTED_OUTPUTS ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : List[str] = BitsAndBytesConfig() lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained( self.model_name ,quantization_config=__lowerCamelCase ,device_map='''auto''' ) lowerCAmelCase__ : int = self.tokenizer(self.input_text ,return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] ,skip_special_tokens=__lowerCamelCase ) ,self.EXPECTED_OUTPUTS ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" with self.assertRaises(__lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(__lowerCamelCase ) def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" lowerCAmelCase__ : str = BitsAndBytesConfig() with self.assertRaises(__lowerCamelCase ): lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained( self.model_name ,quantization_config=__lowerCamelCase ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ,bnb_abit_quant_type='''nf4''' ,) def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" with self.assertRaises(__lowerCamelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(__lowerCamelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(__lowerCamelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(__lowerCamelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(__lowerCamelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowerCAmelCase__ : Optional[Any] = self.tokenizer(self.input_text ,return_tensors='''pt''' ) lowerCAmelCase__ : Any = self.model_fpaa.to(torch.floataa ) lowerCAmelCase__ : Optional[Any] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 ) # Check this does not throw an error lowerCAmelCase__ : List[Any] = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error lowerCAmelCase__ : Any = self.model_fpaa.half() # Check this does not throw an error lowerCAmelCase__ : Optional[Any] = self.model_fpaa.float() def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' @classmethod def lowerCAmelCase__ (cls ) -> List[str]: """simple docstring""" lowerCAmelCase__ : str = '''t5-small''' lowerCAmelCase__ : Dict = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained(cls.model_name ) lowerCAmelCase__ : List[Any] = '''Translate in German: Hello, my dog is cute''' def lowerCAmelCase__ (self ) -> Any: """simple docstring""" gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ (self ) -> Any: """simple docstring""" from transformers import TaForConditionalGeneration lowerCAmelCase__ : Dict = TaForConditionalGeneration._keep_in_fpaa_modules lowerCAmelCase__ : Optional[int] = None # test with `t5-small` lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) lowerCAmelCase__ : Optional[int] = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 ) lowerCAmelCase__ : Optional[Any] = model.generate(**__lowerCamelCase ) # test with `flan-t5-small` lowerCAmelCase__ : Tuple = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) lowerCAmelCase__ : List[str] = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 ) lowerCAmelCase__ : Tuple = model.generate(**__lowerCamelCase ) lowerCAmelCase__ : Tuple = modules def lowerCAmelCase__ (self ) -> int: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q ,bnb.nn.Linearabit ) ) lowerCAmelCase__ : Dict = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 ) lowerCAmelCase__ : int = model.generate(**__lowerCamelCase ) # test with `flan-t5-small` lowerCAmelCase__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) lowerCAmelCase__ : int = self.tokenizer(self.input_text ,return_tensors='''pt''' ).to(0 ) lowerCAmelCase__ : Dict = model.generate(**__lowerCamelCase ) class lowerCamelCase__ ( lowercase_): '''simple docstring''' def lowerCAmelCase__ (self ) -> int: """simple docstring""" super().setUp() # model_name lowerCAmelCase__ : str = '''bigscience/bloom-560m''' lowerCAmelCase__ : Optional[Any] = '''t5-small''' # Different types of model lowerCAmelCase__ : Union[str, Any] = AutoModel.from_pretrained(self.model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) # Sequence classification model lowerCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained( self.model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) # CausalLM model lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) # Seq2seq model lowerCAmelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name ,load_in_abit=__lowerCamelCase ,device_map='''auto''' ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ (self ) -> int: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCamelCase__ ( lowercase_): '''simple docstring''' def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" super().setUp() def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = pipeline( '''text-generation''' ,model=self.model_name ,model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} ,max_new_tokens=self.MAX_NEW_TOKENS ,) # Real second forward pass lowerCAmelCase__ : Any = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] ,self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCamelCase__ ( lowercase_): '''simple docstring''' def lowerCAmelCase__ (self ) -> str: """simple docstring""" super().setUp() def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( self.model_name ,load_in_abit=__lowerCamelCase ,device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) ,{0, 1} ) # Check that inference pass works on the model lowerCAmelCase__ : Union[str, Any] = self.tokenizer(self.input_text ,return_tensors='''pt''' ) # Second real batch lowerCAmelCase__ : Union[str, Any] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) ,max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] ,skip_special_tokens=__lowerCamelCase ) ,self.EXPECTED_OUTPUTS ) class lowerCamelCase__ ( lowercase_): '''simple docstring''' def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : Dict = '''facebook/opt-350m''' super().setUp() def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters lowerCAmelCase__ : str = AutoModelForCausalLM.from_pretrained(self.model_name ,load_in_abit=__lowerCamelCase ) self.assertEqual(set(model.hf_device_map.values() ) ,{torch.cuda.current_device()} ) for param in model.parameters(): lowerCAmelCase__ : Union[str, Any] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowerCAmelCase__ : int = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(__lowerCamelCase ) ): lowerCAmelCase__ : str = LoRALayer(module.q_proj ,rank=16 ) lowerCAmelCase__ : Dict = LoRALayer(module.k_proj ,rank=16 ) lowerCAmelCase__ : Optional[Any] = LoRALayer(module.v_proj ,rank=16 ) # Step 3: dummy batch lowerCAmelCase__ : Dict = self.tokenizer('''Test batch ''' ,return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowerCAmelCase__ : Optional[Any] = model.forward(**__lowerCamelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(__lowerCamelCase ,__lowerCamelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(__lowerCamelCase ,nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCamelCase__ ( lowercase_): '''simple docstring''' snake_case_ ="""gpt2-xl""" snake_case_ =3.31_91_85_48_54_15_21_87
129
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowerCAmelCase : '''simple docstring''' def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = decoder_seq_length # For common tests lowercase__ = self.decoder_seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_model lowercase__ = decoder_layers lowercase__ = decoder_layers lowercase__ = decoder_ffn_dim lowercase__ = decoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = eos_token_id lowercase__ = bos_token_id lowercase__ = pad_token_id lowercase__ = decoder_start_token_id lowercase__ = use_cache lowercase__ = max_position_embeddings lowercase__ = None lowercase__ = decoder_seq_length lowercase__ = 2 lowercase__ = 1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ): '''simple docstring''' lowercase__ = True lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval() lowercase__ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 ) lowercase__ = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ = model(UpperCamelCase )['''last_hidden_state'''] lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state'''] # select random slice lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowercase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else () lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : List[str] = False def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass
2
0
"""simple docstring""" def lowercase ( _snake_case : List[Any] , _snake_case : Optional[int] ) ->Tuple: """simple docstring""" __snake_case : str = (boundary[1] - boundary[0]) / steps __snake_case : Tuple = boundary[0] __snake_case : int = boundary[1] __snake_case : Union[str, Any] = make_points(_snake_case , _snake_case , _snake_case ) __snake_case : Any = 0.0 y += (h / 2.0) * f(_snake_case ) for i in x_i: # print(i) y += h * f(_snake_case ) y += (h / 2.0) * f(_snake_case ) return y def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : List[Any] ) ->int: """simple docstring""" __snake_case : Dict = a + h while x < (b - h): yield x __snake_case : List[Any] = x + h def lowercase ( _snake_case : Optional[Any] ) ->Any: # enter your function here """simple docstring""" __snake_case : str = (x - 0) * (x - 0) return y def lowercase ( ) ->List[str]: """simple docstring""" __snake_case : str = 0.0 # Lower bound of integration __snake_case : Union[str, Any] = 1.0 # Upper bound of integration __snake_case : str = 10.0 # define number of steps or resolution __snake_case : Dict = [a, b] # define boundary of integration __snake_case : Union[str, Any] = method_a(_snake_case , _snake_case ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
102
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" if not isinstance(A , A ): raise TypeError('''only integers accepted as input''' ) else: lowercase__ = str(abs(A ) ) lowercase__ = [list(A ) for char in range(len(A ) )] for index in range(len(A ) ): num_transpositions[index].pop(A ) return max( int(''''''.join(list(A ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
2
0
from typing import TYPE_CHECKING from ...utils import _LazyModule A_ :Tuple = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys A_ :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
71
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCamelCase : str = Mapping[str, np.ndarray] lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict. lowerCamelCase : Any = 0.0_1 @dataclasses.dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent lowerCAmelCase__ : Optional[Sequence[int]] = None def _SCREAMING_SNAKE_CASE (A ) -> Protein: """simple docstring""" lowercase__ = R'''(\[[A-Z]+\]\n)''' lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0] lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowercase__ = ["N", "CA", "C"] lowercase__ = None lowercase__ = None lowercase__ = None for g in groups: if "[PRIMARY]" == g[0]: lowercase__ = g[1][0].strip() for i in range(len(A ) ): if seq[i] not in residue_constants.restypes: lowercase__ = '''X''' # FIXME: strings are immutable lowercase__ = np.array( [residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowercase__ = [] for axis in range(3 ): tertiary.append(list(map(A , g[1][axis].split() ) ) ) lowercase__ = np.array(A ) lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowercase__ = np.zeros( ( len(A ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , ) def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) lowercase__ = prot.parents lowercase__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowercase__ = [p for i, p in zip(A , A ) if i == chain_id] if parents is None or len(A ) == 0: lowercase__ = ['''N/A'''] pdb_headers.append(f"PARENT {' '.join(A )}" ) return pdb_headers def _SCREAMING_SNAKE_CASE (A , A ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = pdb_str.split('''\n''' ) lowercase__ = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) lowercase__ = 42 if prot.parents is not None and len(prot.parents ) > 0: lowercase__ = [] if prot.parents_chain_index is not None: lowercase__ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(A ) , [] ) parent_dict[str(A )].append(A ) lowercase__ = max([int(A ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] ) parents_per_chain.append(A ) else: parents_per_chain.append(list(prot.parents ) ) else: lowercase__ = [['''N/A''']] def make_parent_line(A ) -> str: return f"PARENT {' '.join(A )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowercase__ = 0 for i, l in enumerate(A ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(A ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(A ): lowercase__ = parents_per_chain[chain_counter] else: lowercase__ = ['''N/A'''] out_pdb_lines.append(make_parent_line(A ) ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = residue_constants.restypes + ['''X'''] def res_atoa(A ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowercase__ = residue_constants.atom_types lowercase__ = [] lowercase__ = prot.atom_mask lowercase__ = prot.aatype lowercase__ = prot.atom_positions lowercase__ = prot.residue_index.astype(np.intaa ) lowercase__ = prot.b_factors lowercase__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowercase__ = get_pdb_headers(A ) if len(A ) > 0: pdb_lines.extend(A ) lowercase__ = aatype.shape[0] lowercase__ = 1 lowercase__ = 0 lowercase__ = string.ascii_uppercase lowercase__ = None # Add all atom sites. for i in range(A ): lowercase__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowercase__ = '''ATOM''' lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}" lowercase__ = '''''' lowercase__ = '''''' lowercase__ = 1.00 lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works. lowercase__ = '''''' lowercase__ = '''A''' if chain_index is not None: lowercase__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowercase__ = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(A ) atom_index += 1 lowercase__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowercase__ = True lowercase__ = chain_index[i + 1] if should_terminate: # Close the chain. lowercase__ = '''TER''' lowercase__ = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(A ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(A , A ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
2
0
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( lowercase_ , unittest.TestCase ): lowerCAmelCase_ : str = LayoutLMTokenizer lowerCAmelCase_ : Optional[int] = LayoutLMTokenizerFast lowerCAmelCase_ : Optional[int] = True lowerCAmelCase_ : int = True def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' super().setUp() snake_case_ = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def lowerCAmelCase__ ( self , **a__ ) -> List[str]: '''simple docstring''' return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **a__ ) def lowerCAmelCase__ ( self , a__ ) -> Tuple: '''simple docstring''' snake_case_ = "UNwant\u00E9d,running" snake_case_ = "unwanted, running" return input_text, output_text def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ = self.tokenizer_class(self.vocab_file ) snake_case_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' pass
85
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]: """simple docstring""" lowercase__ = [] create_all_state(1 , A , A , [] , A ) return result def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def _SCREAMING_SNAKE_CASE (A ) -> None: """simple docstring""" for i in total_list: print(*A ) if __name__ == "__main__": lowerCamelCase : Tuple = 4 lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = generate_all_combinations(n, k) print_all_state(total_list)
2
0
import numpy as np def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ): return np.where(vector > 0 ,_UpperCamelCase ,(alpha * (np.exp(_UpperCamelCase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
330
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __A = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['DeiTFeatureExtractor'] __A = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
90
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n' class __lowerCAmelCase ( lowercase_ ): @add_start_docstrings(__magic_name__ ) def __call__(self , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' raise NotImplementedError('''StoppingCriteria needs to be subclassed''' ) class __lowerCAmelCase ( lowercase_ ): def __init__(self , __magic_name__ , __magic_name__ = None ) -> Tuple: '''simple docstring''' snake_case_ : Dict = max_length snake_case_ : List[Any] = max_position_embeddings @add_start_docstrings(__magic_name__ ) def __call__(self , __magic_name__ , __magic_name__ , **__magic_name__ ) -> int: '''simple docstring''' snake_case_ : Any = input_ids.shape[-1] snake_case_ : Tuple = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( '''This is a friendly reminder - the current text generation call will exceed the model\'s predefined ''' F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' '''exceptions, performance degradation, or nothing at all.''' ) return is_done class __lowerCAmelCase ( lowercase_ ): def __init__(self , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' warnings.warn( '''The class `MaxNewTokensCriteria` is deprecated. ''' F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' '''with `max_length = start_length + max_new_tokens` instead.''' , __magic_name__ , ) snake_case_ : Optional[int] = start_length snake_case_ : Tuple = max_new_tokens snake_case_ : Any = start_length + max_new_tokens @add_start_docstrings(__magic_name__ ) def __call__(self , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[Any]: '''simple docstring''' return input_ids.shape[-1] >= self.max_length class __lowerCAmelCase ( lowercase_ ): def __init__(self , __magic_name__ , __magic_name__ = None ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = max_time snake_case_ : Union[str, Any] = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(__magic_name__ ) def __call__(self , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' return time.time() - self.initial_timestamp > self.max_time class __lowerCAmelCase ( lowercase_ ): @add_start_docstrings(__magic_name__ ) def __call__(self , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' return any(criteria(__magic_name__ , __magic_name__ ) for criteria in self ) @property def lowerCamelCase (self ) -> int: '''simple docstring''' for stopping_criterium in self: if isinstance(__magic_name__ , __magic_name__ ): return stopping_criterium.max_length elif isinstance(__magic_name__ , __magic_name__ ): return stopping_criterium.max_length return None def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> StoppingCriteriaList: """simple docstring""" snake_case_ : Optional[int] = stopping_criteria.max_length snake_case_ : Dict = deepcopy(_UpperCamelCase ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , _UpperCamelCase ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCamelCase ) ) return new_stopping_criteria
279
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class _lowercase : def __init__( self: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: List[str]=13 , UpperCamelCase__: Optional[Any]=7 , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: str=True , UpperCamelCase__: Dict=99 , UpperCamelCase__: Optional[Any]=32 , UpperCamelCase__: Dict=5 , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: str="gelu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Optional[Any]=512 , UpperCamelCase__: Optional[Any]=16 , UpperCamelCase__: str=2 , UpperCamelCase__: Any=0.02 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[Any]=4 , UpperCamelCase__: Union[str, Any]=None , ): lowerCamelCase__ : Optional[Any] = parent lowerCamelCase__ : str = batch_size lowerCamelCase__ : Optional[int] = seq_length lowerCamelCase__ : Any = is_training lowerCamelCase__ : str = use_input_mask lowerCamelCase__ : Tuple = use_token_type_ids lowerCamelCase__ : Any = use_labels lowerCamelCase__ : int = vocab_size lowerCamelCase__ : Dict = hidden_size lowerCamelCase__ : str = num_hidden_layers lowerCamelCase__ : List[Any] = num_attention_heads lowerCamelCase__ : List[Any] = intermediate_size lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : List[Any] = hidden_dropout_prob lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase__ : List[Any] = max_position_embeddings lowerCamelCase__ : str = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[str] = num_choices lowerCamelCase__ : Tuple = scope def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : List[Any] = None if self.use_input_mask: lowerCamelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ : str = None if self.use_token_type_ids: lowerCamelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : str = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Any = None if self.use_labels: lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self: str ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: int ): lowerCamelCase__ : Union[str, Any] = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCamelCase__ : List[str] = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ): lowerCamelCase__ : Any = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : Optional[Any] = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[Any] ): lowerCamelCase__ : List[Any] = self.num_labels lowerCamelCase__ : Tuple = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] ): lowerCamelCase__ : Tuple = self.num_labels lowerCamelCase__ : Dict = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: int , UpperCamelCase__: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int ): lowerCamelCase__ : Optional[Any] = self.num_choices lowerCamelCase__ : Any = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCamelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ : Optional[int] = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Optional[Any] = config_and_inputs lowerCamelCase__ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _lowercase ( lowercase_ , lowercase_ , unittest.TestCase ): a = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a = False a = False def lowerCamelCase_ ( self: List[Any] ): lowerCamelCase__ : Any = NystromformerModelTester(self ) lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self: List[str] ): self.config_tester.run_common_tests() def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ : Any = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def lowerCamelCase_ ( self: int ): lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[int] ): lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Optional[Any] ): lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Tuple ): lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def lowerCamelCase_ ( self: Dict ): lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def lowerCamelCase_ ( self: List[str] ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : int = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class _lowercase ( unittest.TestCase ): @slow def lowerCamelCase_ ( self: Union[str, Any] ): lowerCamelCase__ : List[Any] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) lowerCamelCase__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowerCamelCase__ : Optional[Any] = model(UpperCamelCase__ )[0] lowerCamelCase__ : int = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) lowerCamelCase__ : Any = torch.tensor( [[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self: Any ): lowerCamelCase__ : Optional[Any] = """the [MASK] of Belgium is Brussels""" lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) lowerCamelCase__ : List[str] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) lowerCamelCase__ : str = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): lowerCamelCase__ : List[str] = model(encoding.input_ids ).logits lowerCamelCase__ : Optional[Any] = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
41
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """cvt""" def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = num_channels lowercase__ = patch_sizes lowercase__ = patch_stride lowercase__ = patch_padding lowercase__ = embed_dim lowercase__ = num_heads lowercase__ = depth lowercase__ = mlp_ratio lowercase__ = attention_drop_rate lowercase__ = drop_rate lowercase__ = drop_path_rate lowercase__ = qkv_bias lowercase__ = cls_token lowercase__ = qkv_projection_method lowercase__ = kernel_qkv lowercase__ = padding_kv lowercase__ = stride_kv lowercase__ = padding_q lowercase__ = stride_q lowercase__ = initializer_range lowercase__ = layer_norm_eps
2
0
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowercase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_28, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowercase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowercase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55) lowercase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowercase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowercase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowercase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowercase : str = np.expand_dims(test_image, axis=0) lowercase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowercase : Any = 'Normal' if result[0][0] == 1: lowercase : Any = 'Abnormality detected'
3
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowerCamelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = 'Normal' if result[0][0] == 1: lowerCamelCase : Any = 'Abnormality detected'
2
0
'''simple docstring''' import os import sys import transformers A ='3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
34
'''simple docstring''' class __lowerCAmelCase : # Public class to implement a graph '''simple docstring''' def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = row lowercase__ = col lowercase__ = graph def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): # And finally, count all islands. '''simple docstring''' lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += 1 return count
2
0
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase = 'http://www.mocksite.com/file1.txt' UpperCAmelCase = '"text": ["foo", "foo"]' UpperCAmelCase = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class lowerCAmelCase : lowerCAmelCase_ = 2_0_0 lowerCAmelCase_ = {"""Content-Length""": """100"""} lowerCAmelCase_ = {} def snake_case ( self : Tuple , **__lowercase : int ): """simple docstring""" return [bytes(__lowercase , 'utf-8' )] def __UpperCamelCase ( *lowercase__ : Union[str, Any], **lowercase__ : Optional[Any] ): '''simple docstring''' return MockResponse() @pytest.mark.parametrize('urls_type', [str, list, dict] ) def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : int, lowercase__ : Dict ): '''simple docstring''' import requests monkeypatch.setattr(lowercase__, 'request', lowercase__ ) __lowercase =URL if issubclass(lowercase__, lowercase__ ): __lowercase =url elif issubclass(lowercase__, lowercase__ ): __lowercase =[url] elif issubclass(lowercase__, lowercase__ ): __lowercase ={'train': url} __lowercase ='dummy' __lowercase ='downloads' __lowercase =tmp_path __lowercase =DownloadConfig( cache_dir=os.path.join(lowercase__, lowercase__ ), use_etag=lowercase__, ) __lowercase =DownloadManager(dataset_name=lowercase__, download_config=lowercase__ ) __lowercase =dl_manager.download(lowercase__ ) __lowercase =urls for downloaded_paths in [downloaded_paths]: if isinstance(lowercase__, lowercase__ ): __lowercase =[downloaded_paths] __lowercase =[urls] elif isinstance(lowercase__, lowercase__ ): assert "train" in downloaded_paths.keys() __lowercase =downloaded_paths.values() __lowercase =urls.values() assert downloaded_paths for downloaded_path, input_url in zip(lowercase__, lowercase__ ): assert downloaded_path == dl_manager.downloaded_paths[input_url] __lowercase =Path(lowercase__ ) __lowercase =downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() __lowercase =downloaded_path.read_text() assert content == CONTENT __lowercase =downloaded_path.with_suffix('.json' ) assert metadata_downloaded_path.exists() __lowercase =json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('paths_type', [str, list, dict] ) def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : Any, lowercase__ : Dict ): '''simple docstring''' __lowercase =str(lowercase__ ) if issubclass(lowercase__, lowercase__ ): __lowercase =filename elif issubclass(lowercase__, lowercase__ ): __lowercase =[filename] elif issubclass(lowercase__, lowercase__ ): __lowercase ={'train': filename} __lowercase ='dummy' __lowercase =xz_file.parent __lowercase ='extracted' __lowercase =DownloadConfig( cache_dir=lowercase__, use_etag=lowercase__, ) __lowercase =DownloadManager(dataset_name=lowercase__, download_config=lowercase__ ) __lowercase =dl_manager.extract(lowercase__ ) __lowercase =paths for extracted_paths in [extracted_paths]: if isinstance(lowercase__, lowercase__ ): __lowercase =[extracted_paths] __lowercase =[paths] elif isinstance(lowercase__, lowercase__ ): assert "train" in extracted_paths.keys() __lowercase =extracted_paths.values() __lowercase =paths.values() assert extracted_paths for extracted_path, input_path in zip(lowercase__, lowercase__ ): assert extracted_path == dl_manager.extracted_paths[input_path] __lowercase =Path(lowercase__ ) __lowercase =extracted_path.parts assert parts[-1] == hash_url_to_filename(lowercase__, etag=lowercase__ ) assert parts[-2] == extracted_subdir assert extracted_path.exists() __lowercase =extracted_path.read_text() __lowercase =text_file.read_text() assert extracted_file_content == expected_file_content def __UpperCamelCase ( lowercase__ : Any, lowercase__ : List[Any] ): '''simple docstring''' assert path.endswith('.jsonl' ) for num_items, line in enumerate(lowercase__, start=1 ): __lowercase =json.loads(line.decode('utf-8' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('archive_jsonl', ['tar_jsonl_path', 'zip_jsonl_path'] ) def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : Optional[int] ): '''simple docstring''' __lowercase =request.getfixturevalue(lowercase__ ) __lowercase =DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ): _test_jsonl(lowercase__, lowercase__ ) assert num_jsonl == 2 @pytest.mark.parametrize('archive_nested_jsonl', ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] ) def __UpperCamelCase ( lowercase__ : int, lowercase__ : List[str] ): '''simple docstring''' __lowercase =request.getfixturevalue(lowercase__ ) __lowercase =DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase__ ), start=1 ): _test_jsonl(lowercase__, lowercase__ ) assert num_tar == 1 assert num_jsonl == 2 def __UpperCamelCase ( lowercase__ : Any ): '''simple docstring''' __lowercase =DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(lowercase__ ), start=1 ): assert os.path.basename(lowercase__ ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
141
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
0
__snake_case : int =[ (1_0_0_0, 'M'), (9_0_0, 'CM'), (5_0_0, 'D'), (4_0_0, 'CD'), (1_0_0, 'C'), (9_0, 'XC'), (5_0, 'L'), (4_0, 'XL'), (1_0, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def lowerCAmelCase__ ( lowerCamelCase_ : str): '''simple docstring''' lowerCAmelCase__ : Tuple = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000} lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Any = 0 while place < len(lowerCamelCase_): if (place + 1 < len(lowerCamelCase_)) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCAmelCase__ ( lowerCamelCase_ : int): '''simple docstring''' lowerCAmelCase__ : Dict = [] for arabic, roman in ROMAN: ((lowerCAmelCase__) , (lowerCAmelCase__)) : int = divmod(lowerCamelCase_ ,lowerCamelCase_) result.append(roman * factor) if number == 0: break return "".join(lowerCamelCase_) if __name__ == "__main__": import doctest doctest.testmod()
129
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" return len(set(A ) ) == len(A ) if __name__ == "__main__": import doctest doctest.testmod()
2
0
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowercase ( _snake_case : Optional[int] , _snake_case : Any=0.999 , _snake_case : str="cosine" , ) ->Optional[Any]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case : int ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __snake_case : Optional[int] = [] for i in range(_snake_case ): __snake_case : Dict = i / num_diffusion_timesteps __snake_case : Dict = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) ) return torch.tensor(_snake_case , dtype=torch.floataa ) class _UpperCAmelCase ( lowercase_, lowercase_ ): '''simple docstring''' lowerCamelCase__ =[e.name for e in KarrasDiffusionSchedulers] lowerCamelCase__ =2 @register_to_config def __init__(self , a_ = 10_00 , a_ = 0.0_0085 , a_ = 0.012 , a_ = "linear" , a_ = None , a_ = "epsilon" , a_ = "linspace" , a_ = 0 , ): '''simple docstring''' if trained_betas is not None: __snake_case : Optional[int] = torch.tensor(a_ , dtype=torch.floataa ) elif beta_schedule == "linear": __snake_case : Any = torch.linspace(a_ , a_ , a_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __snake_case : Any = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , a_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __snake_case : Tuple = betas_for_alpha_bar(a_ ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) __snake_case : Dict = 1.0 - self.betas __snake_case : Tuple = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(a_ , a_ , a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_=None ): '''simple docstring''' if schedule_timesteps is None: __snake_case : List[Any] = self.timesteps __snake_case : Dict = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __snake_case : List[str] = 1 if len(a_ ) > 1 else 0 else: __snake_case : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep __snake_case : Any = self._index_counter[timestep_int] return indices[pos].item() @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def SCREAMING_SNAKE_CASE (self , a_ , a_ , ): '''simple docstring''' __snake_case : Union[str, Any] = self.index_for_timestep(a_ ) if self.state_in_first_order: __snake_case : List[str] = self.sigmas[step_index] else: __snake_case : Union[str, Any] = self.sigmas_interpol[step_index] __snake_case : Dict = sample / ((sigma**2 + 1) ** 0.5) return sample def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , ): '''simple docstring''' __snake_case : List[str] = num_inference_steps __snake_case : Any = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __snake_case : List[Any] = np.linspace(0 , num_train_timesteps - 1 , a_ , dtype=a_ )[::-1].copy() elif self.config.timestep_spacing == "leading": __snake_case : Optional[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __snake_case : Optional[int] = (np.arange(0 , a_ ) * step_ratio).round()[::-1].copy().astype(a_ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __snake_case : Any = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __snake_case : List[str] = (np.arange(a_ , 0 , -step_ratio )).round().copy().astype(a_ ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) __snake_case : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __snake_case : str = torch.from_numpy(np.log(a_ ) ).to(a_ ) __snake_case : Union[str, Any] = np.interp(a_ , np.arange(0 , len(a_ ) ) , a_ ) __snake_case : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __snake_case : List[str] = torch.from_numpy(a_ ).to(device=a_ ) # interpolate sigmas __snake_case : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() __snake_case : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __snake_case : int = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(a_ ).startswith('''mps''' ): # mps does not support float64 __snake_case : Optional[Any] = torch.from_numpy(a_ ).to(a_ , dtype=torch.floataa ) else: __snake_case : str = torch.from_numpy(a_ ).to(a_ ) # interpolate timesteps __snake_case : List[str] = self.sigma_to_t(a_ ).to(a_ , dtype=timesteps.dtype ) __snake_case : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() __snake_case : Union[str, Any] = torch.cat([timesteps[:1], interleaved_timesteps] ) __snake_case : Dict = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __snake_case : Optional[Any] = defaultdict(a_ ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' __snake_case : Dict = sigma.log() # get distribution __snake_case : Optional[Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range __snake_case : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __snake_case : Any = low_idx + 1 __snake_case : List[Any] = self.log_sigmas[low_idx] __snake_case : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas __snake_case : Union[str, Any] = (low - log_sigma) / (low - high) __snake_case : Dict = w.clamp(0 , 1 ) # transform interpolation to time range __snake_case : str = (1 - w) * low_idx + w * high_idx __snake_case : str = t.view(sigma.shape ) return t @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.sample is None def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ = True , ): '''simple docstring''' __snake_case : Optional[int] = self.index_for_timestep(a_ ) # advance index counter by 1 __snake_case : Dict = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __snake_case : str = self.sigmas[step_index] __snake_case : Dict = self.sigmas_interpol[step_index + 1] __snake_case : Tuple = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __snake_case : int = self.sigmas[step_index - 1] __snake_case : Optional[Any] = self.sigmas_interpol[step_index] __snake_case : List[str] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __snake_case : int = 0 __snake_case : str = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __snake_case : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol __snake_case : Optional[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __snake_case : str = sigma_hat if self.state_in_first_order else sigma_interpol __snake_case : Tuple = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __snake_case : Tuple = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __snake_case : Union[str, Any] = sigma_interpol - sigma_hat # store for 2nd order step __snake_case : List[Any] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __snake_case : List[str] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __snake_case : Any = sigma_next - sigma_hat __snake_case : List[str] = self.sample __snake_case : Optional[Any] = None __snake_case : List[str] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=a_ ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , ): '''simple docstring''' __snake_case : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(a_ ): # mps does not support float64 __snake_case : str = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __snake_case : Optional[int] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __snake_case : Any = self.timesteps.to(original_samples.device ) __snake_case : List[Any] = timesteps.to(original_samples.device ) __snake_case : Optional[int] = [self.index_for_timestep(a_ , a_ ) for t in timesteps] __snake_case : Dict = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __snake_case : Tuple = sigma.unsqueeze(-1 ) __snake_case : List[str] = original_samples + noise * sigma return noisy_samples def __len__(self ): '''simple docstring''' return self.config.num_train_timesteps
102
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase : Any = None lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : List[str] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase : Any = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ : Optional[int] = TaTokenizer lowerCAmelCase__ : List[int] = [] def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True lowercase__ = extra_ids @staticmethod def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , ) return max_model_length def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) logger.info(f"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
2
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging A_ :Optional[int] = logging.get_logger(__name__) A_ :Union[str, Any] = {'vocab_file': 'spiece.model'} A_ :int = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class __A ( lowercase_ ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , lowerCamelCase__ = None , **lowerCamelCase__ , ): """simple docstring""" __UpperCamelCase : List[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token __UpperCamelCase : List[Any] ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , ) __UpperCamelCase : int =3 __UpperCamelCase : int =do_lower_case __UpperCamelCase : str =remove_space __UpperCamelCase : Optional[int] =keep_accents __UpperCamelCase : Optional[int] =vocab_file __UpperCamelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( 'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ' 'See https://pypi.org/project/jieba/ for installation.' ) __UpperCamelCase : Optional[Any] =jieba __UpperCamelCase : Tuple =str.maketrans(' \n' , '\u2582\u2583' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def __lowercase ( self ): """simple docstring""" return len(self.sp_model ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int ={self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.__dict__.copy() __UpperCamelCase : List[str] =None return state def __setstate__( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : int =d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase : List[str] ={} __UpperCamelCase : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" if self.remove_space: __UpperCamelCase : List[Any] =' '.join(inputs.strip().split() ) else: __UpperCamelCase : Union[str, Any] =inputs __UpperCamelCase : Any =outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: __UpperCamelCase : Optional[Any] =unicodedata.normalize('NFKD' , lowerCamelCase__ ) __UpperCamelCase : List[Any] =''.join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] ) if self.do_lower_case: __UpperCamelCase : int =outputs.lower() return outputs def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Union[str, Any] =self.preprocess_text(lowerCamelCase__ ) __UpperCamelCase : Optional[int] =self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ ) __UpperCamelCase : Dict =[] for piece in pieces: if len(lowerCamelCase__ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): __UpperCamelCase : Tuple =self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __UpperCamelCase : Any =cur_pieces[1:] else: __UpperCamelCase : Optional[Any] =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowerCamelCase__ ) else: new_pieces.append(lowerCamelCase__ ) return new_pieces def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" return self.sp_model.PieceToId(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" return self.sp_model.IdToPiece(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =''.join(lowerCamelCase__ ).replace(lowerCamelCase__ , ' ' ).strip() return out_string def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" __UpperCamelCase : Union[str, Any] =[self.sep_token_id] __UpperCamelCase : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is not None: return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] return ([0] * len(lowerCamelCase__ )) + [1, 1] def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" __UpperCamelCase : Optional[Any] =[self.sep_token_id] __UpperCamelCase : str =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase : Tuple =os.path.join( lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ , 'wb' ) as fi: __UpperCamelCase : int =self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,) def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : str =super()._decode(*lowerCamelCase__ , **lowerCamelCase__ ) __UpperCamelCase : str =text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' ) return text
71
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ShapEImgaImgPipeline lowerCAmelCase__ : List[str] = ["""image"""] lowerCAmelCase__ : Any = ["""image"""] lowerCAmelCase__ : Any = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowerCAmelCase__ : Tuple = False @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : str ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return 8 @property def UpperCamelCase__ (self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ = CLIPVisionModel(UpperCamelCase ) return model @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def UpperCamelCase__ (self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowercase__ = PriorTransformer(**UpperCamelCase ) return model @property def UpperCamelCase__ (self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**UpperCamelCase ) return model def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_prior lowercase__ = self.dummy_image_encoder lowercase__ = self.dummy_image_processor lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , ) lowercase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = torch_device == '''cpu''' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) lowercase__ = pipe( UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
2
0
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _SCREAMING_SNAKE_CASE : str = 'python tqdm regex requests packaging filelock numpy tokenizers'.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def UpperCamelCase_( snake_case : Any , snake_case : Tuple=None ): '''simple docstring''' require_version(deps[pkg] , snake_case )
85
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : str = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
0
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 100 , ): '''simple docstring''' A : str = x_start A : Optional[Any] = fnc(snake_case__ ) A : List[str] = 0.0 for _ in range(snake_case__ ): # Approximates curve as a sequence of linear lines and sums their length A : Any = (x_end - x_start) / steps + xa A : Dict = fnc(snake_case__ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step A : Optional[Any] = xa A : Tuple = fxa return length if __name__ == "__main__": def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') lowercase : int = 10 while i <= 10_00_00: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
3
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
1
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowercase : Any = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = '''linear''' __magic_name__ = '''cosine''' __magic_name__ = '''cosine_with_restarts''' __magic_name__ = '''polynomial''' __magic_name__ = '''constant''' __magic_name__ = '''constant_with_warmup''' __magic_name__ = '''piecewise_constant''' def lowerCAmelCase_ ( snake_case__ , snake_case__ = -1 ): '''simple docstring''' return LambdaLR(snake_case__ , lambda snake_case__ : 1 , last_epoch=snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = -1 ): '''simple docstring''' def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1.0 , snake_case__ ) ) return 1.0 return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = -1 ): '''simple docstring''' A : List[Any] = {} A : Tuple = step_rules.split(''',''' ) for rule_str in rule_list[:-1]: A, A : List[Any] = rule_str.split(''':''' ) A : str = int(snake_case__ ) A : List[Any] = float(snake_case__ ) A : Any = value A : Optional[Any] = float(rule_list[-1] ) def create_rules_function(snake_case__ , snake_case__ ): def rule_func(snake_case__ ) -> float: A : Optional[int] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(snake_case__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func A : Any = create_rules_function(snake_case__ , snake_case__ ) return LambdaLR(snake_case__ , snake_case__ , last_epoch=snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=-1 ): '''simple docstring''' def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0.5 , snake_case__ = -1 ): '''simple docstring''' def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) A : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case__ ) * 2.0 * progress )) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , snake_case__ = -1 ): '''simple docstring''' def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) A : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case__ ) * progress) % 1.0) )) ) return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=1E-7 , snake_case__=1.0 , snake_case__=-1 ): '''simple docstring''' A : Optional[Any] = optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' ) def lr_lambda(snake_case__ ): if current_step < num_warmup_steps: return float(snake_case__ ) / float(max(1 , snake_case__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: A : Dict = lr_init - lr_end A : Tuple = num_training_steps - num_warmup_steps A : str = 1 - (current_step - num_warmup_steps) / decay_steps A : Tuple = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(snake_case__ , snake_case__ , snake_case__ ) lowercase : List[str] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = 1 , snake_case__ = 1.0 , snake_case__ = -1 , ): '''simple docstring''' A : str = SchedulerType(snake_case__ ) A : Dict = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(snake_case__ , last_epoch=snake_case__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(snake_case__ , step_rules=snake_case__ , last_epoch=snake_case__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(snake_case__ , num_warmup_steps=snake_case__ , last_epoch=snake_case__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , num_cycles=snake_case__ , last_epoch=snake_case__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , power=snake_case__ , last_epoch=snake_case__ , ) return schedule_func( snake_case__ , num_warmup_steps=snake_case__ , num_training_steps=snake_case__ , last_epoch=snake_case__ )
3
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' while b: A, A : List[Any] = b, a % b return a def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b ) def lowerCAmelCase_ ( ): '''simple docstring''' print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' ) print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' ) print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' ) print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' ) print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' ) print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' ) print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' ) print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' ) print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' ) print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' ) if __name__ == "__main__": main()
3
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Optional[int] = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class A ( __snake_case ): __magic_name__ = '''gptj''' __magic_name__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , SCREAMING_SNAKE_CASE=50400 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=28 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : int = vocab_size A : Tuple = n_positions A : Optional[int] = n_embd A : List[Any] = n_layer A : Optional[Any] = n_head A : Tuple = n_inner A : Tuple = rotary_dim A : Tuple = activation_function A : Optional[int] = resid_pdrop A : Dict = embd_pdrop A : Tuple = attn_pdrop A : Tuple = layer_norm_epsilon A : Any = initializer_range A : Dict = use_cache A : Tuple = bos_token_id A : Any = eos_token_id super().__init__( bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "default" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE , task=SCREAMING_SNAKE_CASE , patching_specs=SCREAMING_SNAKE_CASE , use_past=SCREAMING_SNAKE_CASE ) if not getattr(self._config , '''pad_token_id''' , SCREAMING_SNAKE_CASE ): # TODO: how to do that better? A : str = 0 @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.n_layer @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.n_head def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : Optional[int] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Tuple = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : Dict = seqlen + 2 A : Optional[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Optional[Any] = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : Any = common_inputs['''attention_mask'''] if self.use_past: A : List[str] = ordered_inputs['''attention_mask'''].dtype A : int = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
1
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''' , [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ] , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) A : Optional[int] = DatasetInfosDict.from_directory(snake_case__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''' , [ DatasetInfo(), DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ), ] , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = str(snake_case__ ) dataset_info.write_to_directory(snake_case__ ) A : Optional[Any] = DatasetInfo.from_directory(snake_case__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(snake_case__ , '''dataset_info.json''' ) ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = DatasetInfo( description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) A : str = dataset_info._to_yaml_dict() assert sorted(snake_case__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) A : Optional[Any] = yaml.safe_dump(snake_case__ ) A : Optional[int] = yaml.safe_load(snake_case__ ) assert dataset_info_yaml_dict == reloaded def lowerCAmelCase_ ( ): '''simple docstring''' A : str = DatasetInfo() A : Tuple = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''' , [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=1337 ), } ), ] , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = str(snake_case__ ) dataset_infos_dict.write_to_directory(snake_case__ ) A : Tuple = DatasetInfosDict.from_directory(snake_case__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): A : Dict = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml A : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(snake_case__ , '''README.md''' ) )
3
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
1
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowercase : Optional[int] = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Union[str, Any] = R'''\w+[.]\d+''' A : Optional[int] = re.findall(snake_case__ , snake_case__ ) for pat in pats: A : List[Any] = key.replace(snake_case__ , '''_'''.join(pat.split('''.''' ) ) ) return key def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): A : Tuple = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: A : int = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: A : Dict = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer A : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: A : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : List[str] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": A : List[str] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : List[str] = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : Any = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=42 ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params A : Optional[int] = flax_model.init_weights(PRNGKey(snake_case__ ) ) A : Optional[Any] = flatten_dict(snake_case__ ) A : int = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : Any = rename_key(snake_case__ ) A : Dict = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters A, A : Optional[Any] = rename_key_and_reshape_tensor(snake_case__ , snake_case__ , snake_case__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # also add unexpected weight so that warning is thrown A : Union[str, Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ )
3
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
1
'''simple docstring''' import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin lowercase : str = get_tests_dir('fixtures/test_sentencepiece.model') lowercase : str = get_tests_dir('fixtures/test_sentencepiece_bpe.model') lowercase : str = 'pt' if is_torch_available() else 'tf' @require_sentencepiece @require_tokenizers class A ( __snake_case , unittest.TestCase ): __magic_name__ = CamembertTokenizer __magic_name__ = CamembertTokenizerFast __magic_name__ = True __magic_name__ = True def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing A : List[str] = CamembertTokenizer(SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = '''<pad>''' A : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1004 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = CamembertTokenizer(SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) A : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) A : List[Any] = '''I was born in 92000, and this is falsé.''' A : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE ) A : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) A : Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" if not self.test_rust_tokenizer: return A : int = self.get_tokenizer() A : Optional[Any] = self.get_rust_tokenizer() A : Dict = '''I was born in 92000, and this is falsé.''' A : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : int = self.get_rust_tokenizer() A : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE ) A : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. A : Tuple = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=SCREAMING_SNAKE_CASE , )
3
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
1
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer A : Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = torch.permute(snake_case__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ): # linear layer A : Any = flax_key_tuple[:-1] + ('''weight''',) A : Union[str, Any] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : int = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if "metadata" in layer: A : Union[str, Any] = layer.split('''metadata''' ) A : List[Any] = ''''''.join(split_layer[0] )[:-1] A : int = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: A : Any = layer.split('''kvstore''' ) A : List[Any] = ''''''.join(split_layer[0] )[:-1] A : Optional[Any] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: A : Union[str, Any] = layer.split('''/''' ) A : Optional[int] = '''/'''.join(split_layer[:-1] ) A : Optional[Any] = (split_layer[-1],) if "kvstore/path" in layer: A : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: A : int = '''file''' else: A : Any = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = rename_keys(snake_case__ ) A : Tuple = {} for k, v in current_block.items(): A : Tuple = v A : List[str] = new_current_block torch.save(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = WEIGHTS_NAME ): '''simple docstring''' A : Dict = convert_file_size_to_int(snake_case__ ) A : str = [] A : str = {} A : Any = 0 A : List[str] = 0 os.makedirs(snake_case__ , exist_ok=snake_case__ ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: A : Tuple = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] A : List[Any] = flatten_dict(snake_case__ , sep='''/''' ) A : List[str] = {} for layer in checkpoint_info.keys(): A, A, A : Tuple = get_key_and_tensorstore_dict( snake_case__ , snake_case__ , snake_case__ ) if curr_real_layer_name in all_layers: A : List[str] = content else: A : Optional[Any] = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file A : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() A : List[Any] = torch.tensor(snake_case__ ) A : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts A, A : int = rename_base_flax_keys(tuple(key.split('''/''' ) ) , snake_case__ ) A : Union[str, Any] = '''/'''.join(snake_case__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: A : List[Any] = os.path.join( snake_case__ , weights_name.replace('''.bin''' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) del current_block A : Dict = {} A : List[Any] = 0 A : List[Any] = raw_weights.to(getattr(snake_case__ , snake_case__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block A : Optional[int] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(snake_case__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index A : Union[str, Any] = {} A : List[str] = {} for idx, shard in enumerate(snake_case__ ): A : int = weights_name.replace( '''.bin''' , F'-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin' ) # len(sharded_state_dicts):05d} A : Union[str, Any] = os.path.join(snake_case__ , weights_name.replace('''.bin''' , F'-{idx+1:05d}-of-???.bin' ) ) os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) A : str = shard for key in shard: A : Tuple = shard_file # Add the metadata A : Tuple = {'''total_size''': total_size} A : Optional[int] = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' ) as f: A : Union[str, Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) return metadata, index if __name__ == "__main__": lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) lowercase : List[str] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCAmelCase_ ( ): '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer A : Optional[Any] = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) A : Any = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) A : Any = TaTokenizer.from_pretrained('''t5-small''' ) A : Union[str, Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' A : List[Any] = tokenizer(snake_case__ , return_tensors='''pt''' ).input_ids A : Optional[Any] = model.generate(snake_case__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
3
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X20000 and cp <= 0X2A6DF) # or (cp >= 0X2A700 and cp <= 0X2B73F) # or (cp >= 0X2B740 and cp <= 0X2B81F) # or (cp >= 0X2B820 and cp <= 0X2CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2F800 and cp <= 0X2FA1F) # ): # return True return False def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for char in word: A : Optional[int] = ord(snake_case__ ) if not _is_chinese_char(snake_case__ ): return 0 return 1 def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = set() for token in tokens: A : Tuple = len(snake_case__ ) > 1 and is_chinese(snake_case__ ) if chinese_word: word_set.add(snake_case__ ) A : List[str] = list(snake_case__ ) return word_list def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if not chinese_word_set: return bert_tokens A : Tuple = max([len(snake_case__ ) for w in chinese_word_set] ) A : List[str] = bert_tokens A, A : List[str] = 0, len(snake_case__ ) while start < end: A : str = True if is_chinese(bert_word[start] ): A : List[str] = min(end - start , snake_case__ ) for i in range(snake_case__ , 1 , -1 ): A : Union[str, Any] = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): A : List[str] = '''##''' + bert_word[j] A : Optional[int] = start + i A : Tuple = False break if single_word: start += 1 return bert_word def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = [] for i in range(0 , len(snake_case__ ) , 100 ): A : Optional[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0] A : List[str] = [get_chinese_word(snake_case__ ) for r in res] ltp_res.extend(snake_case__ ) assert len(snake_case__ ) == len(snake_case__ ) A : Any = [] for i in range(0 , len(snake_case__ ) , 100 ): A : Optional[int] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(snake_case__ ) == len(snake_case__ ) A : List[str] = [] for input_ids, chinese_word in zip(snake_case__ , snake_case__ ): A : List[str] = [] for id in input_ids: A : Any = bert_tokenizer._convert_id_to_token(snake_case__ ) input_tokens.append(snake_case__ ) A : Dict = add_sub_symbol(snake_case__ , snake_case__ ) A : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(snake_case__ ): if token[:2] == "##": A : Dict = token[2:] # save chinese tokens' pos if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ): ref_id.append(snake_case__ ) ref_ids.append(snake_case__ ) assert len(snake_case__ ) == len(snake_case__ ) return ref_ids def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: A : Dict = f.readlines() A : Any = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A : int = LTP(args.ltp ) # faster in GPU device A : Union[str, Any] = BertTokenizer.from_pretrained(args.bert ) A : Union[str, Any] = prepare_ref(snake_case__ , snake_case__ , snake_case__ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: A : Tuple = [json.dumps(snake_case__ ) + '''\n''' for ref in ref_ids] f.writelines(snake_case__ ) if __name__ == "__main__": lowercase : Any = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') lowercase : Union[str, Any] = parser.parse_args() main(args)
3
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
1
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = parent A : Optional[int] = 100 A : Optional[int] = batch_size A : Any = image_size A : Tuple = patch_size A : Union[str, Any] = num_channels A : str = is_training A : Dict = use_labels A : Any = hidden_size A : Dict = num_hidden_layers A : List[Any] = num_attention_heads A : Dict = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Any = attention_probs_dropout_prob A : List[Any] = type_sequence_label_size A : Dict = initializer_range A : int = scope A : Any = out_indices A : Any = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A : List[Any] = (image_size // patch_size) ** 2 A : Union[str, Any] = num_patches + 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None A : Any = None if self.use_labels: A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A : Dict = self.get_config() return config, pixel_values, labels, pixel_labels def __lowerCAmelCase ( self ) -> str: """simple docstring""" return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = BeitModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = BeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Dict = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : int = self.type_sequence_label_size A : Dict = BeitForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Dict = 1 A : Optional[Any] = BeitForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : List[str] = self.num_labels A : int = BeitForSemanticSegmentation(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[int] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) A : List[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A, A : int = config_and_inputs A : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : str = BeitModelTester(self ) A : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''BEiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" pass def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A, A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = model_class(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Any = [*signature.parameters.keys()] A : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" if not self.model_tester.is_training: return A, A : Dict = self.model_tester.prepare_config_and_inputs_for_common() A : Optional[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]: continue A : Optional[int] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.train() A : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) A : List[str] = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A : Union[str, Any] = False A : Tuple = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue A : int = model_class(SCREAMING_SNAKE_CASE ) model.gradient_checkpointing_enable() model.to(SCREAMING_SNAKE_CASE ) model.train() A : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) A : Dict = model(**SCREAMING_SNAKE_CASE ).loss loss.backward() def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() A : List[Any] = _config_zero_init(SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: A : List[str] = model_class(config=SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : int = BeitModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(SCREAMING_SNAKE_CASE ) A : int = self.default_image_processor A : Optional[int] = prepare_img() A : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE ) # prepare bool_masked_pos A : Optional[int] = torch.ones((1, 196) , dtype=torch.bool ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : int = model(pixel_values=SCREAMING_SNAKE_CASE , bool_masked_pos=SCREAMING_SNAKE_CASE ) A : Optional[int] = outputs.logits # verify the logits A : str = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE ) A : Union[str, Any] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE , atol=1e-2 ) ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(SCREAMING_SNAKE_CASE ) A : Tuple = self.default_image_processor A : Tuple = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : Tuple = model(**SCREAMING_SNAKE_CASE ) A : Optional[int] = outputs.logits # verify the logits A : List[str] = torch.Size((1, 1000) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) A : List[Any] = 281 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[str] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to( SCREAMING_SNAKE_CASE ) A : Union[str, Any] = self.default_image_processor A : List[Any] = prepare_img() A : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : str = model(**SCREAMING_SNAKE_CASE ) A : Dict = outputs.logits # verify the logits A : Union[str, Any] = torch.Size((1, 21841) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE ) A : List[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) A : str = 2396 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Tuple = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) A : Tuple = model.to(SCREAMING_SNAKE_CASE ) A : int = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE , size=640 , do_center_crop=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) A : List[Any] = Image.open(ds[0]['''file'''] ) A : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : List[Any] = model(**SCREAMING_SNAKE_CASE ) A : Union[str, Any] = outputs.logits # verify the logits A : Union[str, Any] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE ) A : int = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' ) if is_pillow_less_than_a: A : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] , device=SCREAMING_SNAKE_CASE , ) else: A : int = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] , device=SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) A : Dict = model.to(SCREAMING_SNAKE_CASE ) A : Optional[int] = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE , size=640 , do_center_crop=SCREAMING_SNAKE_CASE ) A : Optional[int] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) A : Optional[int] = Image.open(ds[0]['''file'''] ) A : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : Optional[Any] = model(**SCREAMING_SNAKE_CASE ) A : Any = outputs.logits.detach().cpu() A : List[str] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] ) A : int = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE ) A : Dict = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE ) A : str = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) A : Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(snake_case__ ) # Let's go A : Union[str, Any] = parser.parse_args() if not hasattr(snake_case__ , '''func''' ): parser.print_help() exit(1 ) # Run A : Union[str, Any] = args.func(snake_case__ ) service.run() if __name__ == "__main__": main()
3
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
1
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
1
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : str = [] embed.append( ( F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', F'stage{idx}.patch_embed.proj.weight', ) ) embed.append( ( F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', F'stage{idx}.patch_embed.proj.bias', ) ) embed.append( ( F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', F'stage{idx}.patch_embed.norm.weight', ) ) embed.append( ( F'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', F'stage{idx}.patch_embed.norm.bias', ) ) return embed def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = [] attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight', F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight', F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias', F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean', F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var', F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked', F'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight', F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight', F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias', F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean', F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var', F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked', F'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight', F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight', F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias', F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean', F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var', F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked', F'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight', F'stage{idx}.blocks.{cnt}.attn.proj_q.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias', F'stage{idx}.blocks.{cnt}.attn.proj_q.bias', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight', F'stage{idx}.blocks.{cnt}.attn.proj_k.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias', F'stage{idx}.blocks.{cnt}.attn.proj_k.bias', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight', F'stage{idx}.blocks.{cnt}.attn.proj_v.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias', F'stage{idx}.blocks.{cnt}.attn.proj_v.bias', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight', F'stage{idx}.blocks.{cnt}.attn.proj.weight', ) ) attention_weights.append( ( F'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias', F'stage{idx}.blocks.{cnt}.attn.proj.bias', ) ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc1.weight') ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc1.bias') ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', F'stage{idx}.blocks.{cnt}.mlp.fc2.weight') ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', F'stage{idx}.blocks.{cnt}.mlp.fc2.bias') ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', F'stage{idx}.blocks.{cnt}.norm1.weight') ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', F'stage{idx}.blocks.{cnt}.norm1.bias') ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', F'stage{idx}.blocks.{cnt}.norm2.weight') ) attention_weights.append( (F'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', F'stage{idx}.blocks.{cnt}.norm2.bias') ) return attention_weights def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = [] token.append((F'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') ) return token def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[int] = [] head.append(('''layernorm.weight''', '''norm.weight''') ) head.append(('''layernorm.bias''', '''norm.bias''') ) head.append(('''classifier.weight''', '''head.weight''') ) head.append(('''classifier.bias''', '''head.bias''') ) return head def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = '''imagenet-1k-id2label.json''' A : str = 1000 A : Tuple = '''huggingface/label-files''' A : List[Any] = num_labels A : List[Any] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' ) ) , '''r''' ) ) A : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} A : int = idalabel A : Any = {v: k for k, v in idalabel.items()} A : List[str] = CvtConfig(num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13": A : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21": A : Optional[Any] = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: A : Optional[Any] = [2, 2, 20] A : int = [3, 12, 16] A : str = [192, 768, 1024] A : Union[str, Any] = CvtForImageClassification(snake_case__ ) A : Optional[int] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) A : List[str] = image_size A : Dict = torch.load(snake_case__ , map_location=torch.device('''cpu''' ) ) A : int = OrderedDict() A : Optional[Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: A : Optional[int] = list_of_state_dict + cls_token(snake_case__ ) A : List[Any] = list_of_state_dict + embeddings(snake_case__ ) for cnt in range(config.depth[idx] ): A : Dict = list_of_state_dict + attention(snake_case__ , snake_case__ ) A : Optional[int] = list_of_state_dict + final() for gg in list_of_state_dict: print(snake_case__ ) for i in range(len(snake_case__ ) ): A : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(snake_case__ ) model.save_pretrained(snake_case__ ) image_processor.save_pretrained(snake_case__ ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--cvt_model', default='cvt-w24', type=str, help='Name of the cvt model you\'d like to convert.', ) parser.add_argument( '--image_size', default=3_84, type=int, help='Input Image Size', ) parser.add_argument( '--cvt_file_name', default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth', type=str, help='Input Image Size', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : Union[str, Any] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
3
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
1
'''simple docstring''' import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = mock.Mock() A : str = 500 A : List[str] = {} A : List[Any] = HTTPError A : str = {} # Download this model to make sure it's in the cache. A : Tuple = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE ) as mock_head: A : Dict = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : str = mock.Mock() A : Optional[Any] = 500 A : str = {} A : Tuple = HTTPError A : str = {} # Download this model to make sure it's in the cache. A : str = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE ) as mock_head: A : Union[str, Any] = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # This check we did call the fake head request mock_head.assert_called() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" try: A : Dict = tempfile.mktemp() with open(SCREAMING_SNAKE_CASE , '''wb''' ) as f: http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , SCREAMING_SNAKE_CASE ) A : List[str] = AlbertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) finally: os.remove(SCREAMING_SNAKE_CASE ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('''tokenizer.json''' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('''tokenizer.json''' , '''wb''' ) as f: http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , SCREAMING_SNAKE_CASE ) A : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('''tokenizer.json''' ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : int = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Optional[int]: """simple docstring""" A : List[str] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> List[str]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-tokenizer''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: A : str = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : List[Any] = BertTokenizer(SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token ) A : Optional[Any] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''test-tokenizer''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE , repo_id='''test-tokenizer''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = BertTokenizer(SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token ) A : List[Any] = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : str = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) # No fast custom tokenizer tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) A : Dict = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: A : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE ) bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) A : Any = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE ) tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) A : Union[str, Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' ) A : str = AutoTokenizer.from_pretrained( F'{USER}/test-dynamic-tokenizer' , use_fast=SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[Any] = Trie() trie.add('''Hello 友達''' ) self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) trie.add('''Hello''' ) trie.data self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[str] = Trie() self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] ) trie.add('''[CLS]''' ) trie.add('''extra_id_1''' ) trie.add('''extra_id_100''' ) self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Dict = Trie() trie.add('''A''' ) self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] ) self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = Trie() trie.add('''TOKEN]''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = Trie() trie.add('''A''' ) trie.add('''P''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Any = Trie() trie.add('''AB''' ) trie.add('''B''' ) trie.add('''C''' ) self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[str] = Trie() trie.add('''ABC''' ) trie.add('''B''' ) trie.add('''CD''' ) self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = Trie() A : Optional[int] = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(SCREAMING_SNAKE_CASE , ['''AB''', '''C'''] )
3
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
1
'''simple docstring''' lowercase : List[Any] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' lowercase : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}] lowercase : List[str] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
3
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A ( __snake_case ): __magic_name__ = ['''vqvae'''] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , mel=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler , SCREAMING_SNAKE_CASE ) else 1000 @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" A : Optional[Any] = steps or self.get_default_steps() self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) A : Optional[Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: A : Union[str, Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: A : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=SCREAMING_SNAKE_CASE , device=self.device , ) A : Union[str, Any] = noise A : int = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE ) A : Optional[int] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) A : Tuple = (input_image / 255) * 2 - 1 A : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: A : Optional[int] = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE , 0 ) ).latent_dist.sample( generator=SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = self.vqvae.config.scaling_factor * input_images if start_step > 0: A : Union[str, Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.scheduler.timesteps[start_step - 1] ) A : Optional[Any] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) A : str = int(mask_start_secs * pixels_per_second ) A : int = int(mask_end_secs * pixels_per_second ) A : List[Any] = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , SCREAMING_SNAKE_CASE ): A : List[str] = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )['''sample'''] else: A : int = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )['''sample'''] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE ): A : Any = self.scheduler.step( model_output=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , sample=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )['''prev_sample'''] else: A : Optional[int] = self.scheduler.step( model_output=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , sample=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )['''prev_sample'''] if mask is not None: if mask_start > 0: A : str = mask[:, step, :, :mask_start] if mask_end > 0: A : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance A : str = 1 / self.vqvae.config.scaling_factor * images A : Tuple = self.vqvae.decode(SCREAMING_SNAKE_CASE )['''sample'''] A : Tuple = (images / 2 + 0.5).clamp(0 , 1 ) A : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() A : Optional[Any] = (images * 255).round().astype('''uint8''' ) A : Optional[int] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(SCREAMING_SNAKE_CASE , mode='''RGB''' ).convert('''L''' ) for _ in images) ) A : Any = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) , **ImagePipelineOutput(SCREAMING_SNAKE_CASE ) ) @torch.no_grad() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler , SCREAMING_SNAKE_CASE ) self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) A : int = (sample / 255) * 2 - 1 A : Union[str, Any] = torch.Tensor(SCREAMING_SNAKE_CASE ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): A : Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps A : Union[str, Any] = self.scheduler.alphas_cumprod[t] A : Tuple = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) A : List[Any] = 1 - alpha_prod_t A : Dict = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )['''sample'''] A : List[str] = (1 - alpha_prod_t_prev) ** 0.5 * model_output A : List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) A : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> torch.Tensor: """simple docstring""" A : List[str] = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE ) , torch.flatten(SCREAMING_SNAKE_CASE ) ) / torch.norm(SCREAMING_SNAKE_CASE ) / torch.norm(SCREAMING_SNAKE_CASE ) ) return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Any = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
1
'''simple docstring''' import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=[] ): '''simple docstring''' A : Union[str, Any] = size[0] - overlap_pixels * 2 A : str = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels A : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 A : Dict = np.pad(snake_case__ , mode='''linear_ramp''' , pad_width=snake_case__ , end_values=0 ) if "l" in remove_borders: A : Any = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: A : Any = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: A : Any = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: A : Union[str, Any] = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return max(snake_case__ , min(snake_case__ , snake_case__ ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = list(snake_case__ ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap A : str = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] ) return rect def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(snake_case__ , (original_slice, 0) ) return result def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) A : Union[str, Any] = tile.crop(snake_case__ ) return tile def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = n % d return n - divisor class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 350 , ) -> List[Any]: """simple docstring""" super().__init__( vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , max_noise_level=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) A : List[Any] = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) A : List[Any] = add_overlap_rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image.size ) A : Dict = image.crop(SCREAMING_SNAKE_CASE ) A : Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] A : Any = translated_slice_x - (original_image_slice / 2) A : Optional[Any] = max(0 , SCREAMING_SNAKE_CASE ) A : List[str] = squeeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = to_input.size A : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) A : str = super(SCREAMING_SNAKE_CASE , self ).__call__(image=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).images[0] A : str = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) A : int = unsqueeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) A : Optional[int] = [] if x == 0: remove_borders.append('''l''' ) elif crop_rect[2] == image.size[0]: remove_borders.append('''r''' ) if y == 0: remove_borders.append('''t''' ) elif crop_rect[3] == image.size[1]: remove_borders.append('''b''' ) A : Optional[Any] = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE ) , mode='''L''' , ) final_image.paste( SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 75 , SCREAMING_SNAKE_CASE = 9.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 32 , ) -> Dict: """simple docstring""" A : str = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) ) A : Tuple = math.ceil(image.size[0] / tile_size ) A : List[Any] = math.ceil(image.size[1] / tile_size ) A : Optional[int] = tcx * tcy A : int = 0 for y in range(SCREAMING_SNAKE_CASE ): for x in range(SCREAMING_SNAKE_CASE ): self._process_tile( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prompt=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , noise_level=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , ) current_count += 1 if callback is not None: callback({'''progress''': current_count / total_tile_count, '''image''': final_image} ) return final_image def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = '''stabilityai/stable-diffusion-x4-upscaler''' A : int = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='''fp16''' , torch_dtype=torch.floataa ) A : Dict = pipe.to('''cuda''' ) A : Tuple = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' ) def callback(snake_case__ ): print(F'progress: {obj["progress"]:.4f}' ) obj["image"].save('''diffusers_library_progress.jpg''' ) A : Optional[int] = pipe(image=snake_case__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=snake_case__ ) final_image.save('''diffusers_library.jpg''' ) if __name__ == "__main__": main()
3
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
1
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin lowercase : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class A ( __snake_case , unittest.TestCase ): __magic_name__ = BartphoTokenizer __magic_name__ = False __magic_name__ = True def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().setUp() A : Dict = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] A : List[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : str = {'''unk_token''': '''<unk>'''} A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n' ) A : Dict = BartphoTokenizer(SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Tuple = '''This is a là test''' A : Tuple = '''This is a<unk><unk> test''' return input_text, output_text def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Any = BartphoTokenizer(SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map ) A : Tuple = '''This is a là test''' A : Any = '''▁This ▁is ▁a ▁l à ▁t est'''.split() A : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = tokens + [tokenizer.unk_token] A : Tuple = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
1
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
1
'''simple docstring''' import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = 1.5 A : Dict = int(factor * num_class_images ) A : Tuple = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 ) os.makedirs(F'{class_data_dir}/images' , exist_ok=snake_case__ ) if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images: return while True: A : Tuple = client.query(text=snake_case__ ) if len(snake_case__ ) >= factor * num_class_images or num_images > 1E4: break else: A : Dict = int(factor * num_images ) A : Optional[int] = ClipClient( url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 , ) A : Dict = 0 A : Optional[int] = 0 A : Tuple = tqdm(desc='''downloading real regularization images''' , total=snake_case__ ) with open(F'{class_data_dir}/caption.txt' , '''w''' ) as fa, open(F'{class_data_dir}/urls.txt' , '''w''' ) as fa, open( F'{class_data_dir}/images.txt' , '''w''' ) as fa: while total < num_class_images: A : Optional[Any] = class_images[count] count += 1 try: A : Dict = requests.get(images['''url'''] ) if img.status_code == 200: A : Tuple = Image.open(BytesIO(img.content ) ) with open(F'{class_data_dir}/images/{total}.jpg' , '''wb''' ) as f: f.write(img.content ) fa.write(images['''caption'''] + '''\n''' ) fa.write(images['''url'''] + '''\n''' ) fa.write(F'{class_data_dir}/images/{total}.jpg' + '''\n''' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCAmelCase_ ( ): '''simple docstring''' A : int = argparse.ArgumentParser('''''' , add_help=snake_case__ ) parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case__ , type=snake_case__ ) parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case__ , type=snake_case__ ) parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=snake_case__ ) return parser.parse_args() if __name__ == "__main__": lowercase : Optional[int] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
3
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
1
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration lowercase : Optional[int] = HfArgumentParser(InitializationArguments) lowercase : Union[str, Any] = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization lowercase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks lowercase : Optional[int] = { 'vocab_size': len(tokenizer), 'scale_attn_by_inverse_layer_idx': True, 'reorder_and_upcast_attn': True, } # Load model config (GPT-2 large in this case) lowercase : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config lowercase : str = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
3
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
1
'''simple docstring''' import logging import os from .state import PartialState class A ( logging.LoggerAdapter ): @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : List[str] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) A : Dict = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE ) if self.isEnabledFor(SCREAMING_SNAKE_CASE ): if self._should_log(SCREAMING_SNAKE_CASE ): A, A : Optional[Any] = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif in_order: A : str = PartialState() for i in range(state.num_processes ): if i == state.process_index: A, A : List[Any] = self.process(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.logger.log(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) state.wait_for_everyone() def lowerCAmelCase_ ( snake_case__ , snake_case__ = None ): '''simple docstring''' if log_level is None: A : str = os.environ.get('''ACCELERATE_LOG_LEVEL''' , snake_case__ ) A : Dict = logging.getLogger(snake_case__ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case__ , {} )
3
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
1
'''simple docstring''' lowercase : Optional[Any] = 'Alexander Joslin' import operator as op from .stack import Stack def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} A : Stack[int] = Stack() A : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(snake_case__ ) ) elif i in operators: # RULE 2 operator_stack.push(snake_case__ ) elif i == ")": # RULE 4 A : List[str] = operator_stack.peek() operator_stack.pop() A : Any = operand_stack.peek() operand_stack.pop() A : int = operand_stack.peek() operand_stack.pop() A : int = operators[opr](snake_case__ , snake_case__ ) operand_stack.push(snake_case__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowercase : Any = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
3
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
1
'''simple docstring''' from math import factorial, radians def lowerCAmelCase_ ( snake_case__ , snake_case__ = 18 , snake_case__ = 10 ): '''simple docstring''' A : List[str] = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0) # Converting from degrees to radians A : Optional[int] = radians(snake_case__ ) A : List[str] = angle_in_radians A : Union[str, Any] = 3 A : int = -1 for _ in range(snake_case__ ): result += (b * (angle_in_radians**a)) / factorial(snake_case__ ) A : Any = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(snake_case__ , snake_case__ ) if __name__ == "__main__": __import__('doctest').testmod()
3
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
1
'''simple docstring''' import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( __snake_case , unittest.TestCase ): __magic_name__ = CLIPTokenizer __magic_name__ = CLIPTokenizerFast __magic_name__ = True __magic_name__ = {} __magic_name__ = False def __lowerCAmelCase ( self ) -> Any: """simple docstring""" super().setUp() # fmt: off A : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on A : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>'''] A : List[Any] = {'''unk_token''': '''<unk>'''} A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : str = '''lower newer''' A : int = '''lower newer''' return input_text, output_text def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A : Optional[Any] = '''lower newer''' A : int = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>'''] A : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[Any] = tokens + [tokenizer.unk_token] A : Any = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) @require_ftfy def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : List[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : List[str] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.''' A : Any = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways A : List[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y''' A : Union[str, Any] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : Dict = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Test that the tokenization is identical on unicode of space type A : Tuple = [ '''\u0009''', # (horizontal tab, '\t') '''\u000B''', # (vertical tab) '''\u000C''', # (form feed) '''\u0020''', # (space, ' ') '''\u200E''', # (left-to-right mark):w '''\u200F''', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: A : List[str] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : List[Any] = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Test that the tokenization is identical on unicode of line break type A : int = [ '''\u000A''', # (line feed, '\n') '''\r\n''', # (carriage return and line feed, '\r\n') '''\u000D''', # (carriage return, '\r') '''\r''', # (carriage return, '\r') '''\u000D''', # (carriage return, '\r') '''\u2028''', # (line separator) '''\u2029''', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: A : List[str] = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` A : List[Any] = F'{text_of_1_token} {text_of_1_token}' A : Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , ) A : Optional[int] = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , ) A : Any = F' {text}' A : Dict = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , ) A : int = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ) + 1, 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ) as context: self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' ) self.assertTrue( context.exception.args[0].startswith( '''The `backend_tokenizer` provided does not match the expected format.''' ) ) @require_ftfy def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" super().test_tokenization_python_rust_equals() def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" pass
3
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[Any] = logging.get_logger(__name__) lowercase : List[Any] = { 'microsoft/unispeech-large-1500h-cv': ( 'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class A ( __snake_case ): __magic_name__ = '''unispeech''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.5 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Any = hidden_size A : Union[str, Any] = feat_extract_norm A : int = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : Dict = list(SCREAMING_SNAKE_CASE ) A : Any = list(SCREAMING_SNAKE_CASE ) A : Any = conv_bias A : Dict = num_conv_pos_embeddings A : str = num_conv_pos_embedding_groups A : Optional[Any] = len(self.conv_dim ) A : str = num_hidden_layers A : Optional[Any] = intermediate_size A : Any = hidden_act A : List[str] = num_attention_heads A : int = hidden_dropout A : Optional[Any] = attention_dropout A : Union[str, Any] = activation_dropout A : str = feat_proj_dropout A : Tuple = final_dropout A : str = layerdrop A : Optional[int] = layer_norm_eps A : Optional[Any] = initializer_range A : Dict = num_ctc_classes A : int = vocab_size A : Dict = do_stable_layer_norm A : Dict = use_weighted_layer_sum A : Any = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Dict = apply_spec_augment A : Tuple = mask_time_prob A : Dict = mask_time_length A : List[Any] = mask_time_min_masks A : List[Any] = mask_feature_prob A : List[str] = mask_feature_length A : Any = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A : Dict = num_codevectors_per_group A : str = num_codevector_groups A : List[Any] = contrastive_logits_temperature A : Optional[Any] = feat_quantizer_dropout A : Union[str, Any] = num_negatives A : str = codevector_dim A : Tuple = proj_codevector_dim A : Optional[int] = diversity_loss_weight # ctc loss A : List[Any] = ctc_loss_reduction A : int = ctc_zero_infinity # pretraining loss A : Union[str, Any] = replace_prob @property def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
3
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
1
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() lowercase : Optional[Any] = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['''RobertaPreLayerNormForMaskedLM'''] ) # convert state_dict A : int = torch.load(hf_hub_download(repo_id=snake_case__ , filename='''pytorch_model.bin''' ) ) A : str = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('''roberta.''' ): A : str = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ): continue A : Tuple = tensor_value A : Optional[int] = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A : Any = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase : Tuple = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
3
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
1
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowercase : Dict = 'src/diffusers' # Matches is_xxx_available() lowercase : Dict = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla lowercase : Dict = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') lowercase : Tuple = '\n{0} = None\n' lowercase : Dict = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' lowercase : Optional[int] = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = _re_backend.findall(snake_case__ ) if len(snake_case__ ) == 0: return None return "_and_".join(snake_case__ ) def lowerCAmelCase_ ( ): '''simple docstring''' with open(os.path.join(snake_case__ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Get to the point we do the actual imports for type checking A : Tuple = 0 A : int = {} # Go through the end of the file while line_index < len(snake_case__ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block A : List[Any] = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 A : Optional[int] = [] # Until we unindent, add backend objects to the list while line_index < len(snake_case__ ) and len(lines[line_index] ) > 1: A : Union[str, Any] = lines[line_index] A : Tuple = _re_single_line_import.search(snake_case__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(snake_case__ ) > 0: A : Optional[Any] = objects else: line_index += 1 return backend_specific_objects def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if name.isupper(): return DUMMY_CONSTANT.format(snake_case__ ) elif name.islower(): return DUMMY_FUNCTION.format(snake_case__ , snake_case__ ) else: return DUMMY_CLASS.format(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__=None ): '''simple docstring''' if backend_specific_objects is None: A : Optional[int] = read_init() # For special correspondence backend to module name as used in the function requires_modulename A : Optional[int] = {} for backend, objects in backend_specific_objects.items(): A : Optional[int] = '''[''' + ''', '''.join(F'"{b}"' for b in backend.split('''_and_''' ) ) + ''']''' A : Optional[int] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(snake_case__ , snake_case__ ) for o in objects] ) A : Dict = dummy_file return dummy_files def lowerCAmelCase_ ( snake_case__=False ): '''simple docstring''' A : int = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py A : Optional[int] = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. A : Any = os.path.join(snake_case__ , '''utils''' ) A : Optional[int] = { backend: os.path.join(snake_case__ , F'dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py' ) for backend in dummy_files.keys() } A : Optional[Any] = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(snake_case__ ): with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Any = f.read() else: A : Optional[Any] = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'Updating diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py as the main ' '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F'diffusers.utils.dummy_{short_names.get(snake_case__ , snake_case__ )}_objects.py. Run `make fix-copies` ' '''to fix this.''' ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : Dict = parser.parse_args() check_dummies(args.fix_and_overwrite)
3
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
1
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = IFPipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return self._get_dummy_components() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Optional[int]: """simple docstring""" if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ): A : str = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: A : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) A : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" self._test_save_load_local() def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) A : str = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) A, A : Optional[Any] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() A : List[Any] = None A : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img A : Optional[Any] = IFImgaImgPipeline(**pipe_a.components ) A : Union[str, Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting A : str = IFInpaintingPipeline(**pipe_a.components ) A : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _start_torch_memory_measurement() A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , ) A : Any = output.images[0] assert image.shape == (64, 64, 3) A : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 A : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() A : str = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) A : List[str] = output.images[0] assert image.shape == (256, 256, 3) A : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _start_torch_memory_measurement() A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , ) A : List[str] = output.images[0] assert image.shape == (64, 64, 3) A : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Tuple = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , original_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) A : Any = output.images[0] assert image.shape == (256, 256, 3) A : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _start_torch_memory_measurement() A : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , ) A : Any = output.images[0] assert image.shape == (64, 64, 3) A : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() A : int = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE ) A : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , original_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) A : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) A : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
3
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
1
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters lowercase : Optional[int] = (7_20, 12_80) # Height, Width lowercase : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it. lowercase : List[Any] = 1 / 1_00 lowercase : Optional[Any] = '' lowercase : Dict = '' lowercase : str = '' lowercase : str = 2_50 def lowerCAmelCase_ ( ): '''simple docstring''' A, A : Dict = get_dataset(snake_case__ , snake_case__ ) for index in range(snake_case__ ): A : Tuple = random.sample(range(len(snake_case__ ) ) , 4 ) A, A, A : List[str] = update_image_and_anno( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' A : Union[str, Any] = random_chars(32 ) A : Any = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] A : Union[str, Any] = F'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(F'{file_root}.jpg' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) A : Dict = [] for anno in new_annos: A : Optional[int] = anno[3] - anno[1] A : int = anno[4] - anno[2] A : str = anno[1] + width / 2 A : List[str] = anno[2] + height / 2 A : Dict = F'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(snake_case__ ) with open(F'{file_root}.txt' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = [] A : Dict = [] for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ): A : List[str] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(snake_case__ ) as in_file: A : Optional[Any] = in_file.readlines() A : Optional[Any] = os.path.join(snake_case__ , F'{label_name}.jpg' ) A : Tuple = [] for obj_list in obj_lists: A : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' ) A : str = float(obj[1] ) - float(obj[3] ) / 2 A : List[Any] = float(obj[2] ) - float(obj[4] ) / 2 A : int = float(obj[1] ) + float(obj[3] ) / 2 A : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(snake_case__ ) labels.append(snake_case__ ) return img_paths, labels def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0.0 , ): '''simple docstring''' A : List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) A : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) A : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) A : Optional[Any] = int(scale_x * output_size[1] ) A : Union[str, Any] = int(scale_y * output_size[0] ) A : Union[str, Any] = [] A : Union[str, Any] = [] for i, index in enumerate(snake_case__ ): A : List[Any] = all_img_list[index] path_list.append(snake_case__ ) A : Tuple = all_annos[index] A : Optional[Any] = cva.imread(snake_case__ ) if i == 0: # top-left A : Optional[Any] = cva.resize(snake_case__ , (divid_point_x, divid_point_y) ) A : Optional[Any] = img for bbox in img_annos: A : Union[str, Any] = bbox[1] * scale_x A : Optional[Any] = bbox[2] * scale_y A : int = bbox[3] * scale_x A : Optional[int] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right A : Any = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) ) A : Optional[Any] = img for bbox in img_annos: A : Optional[Any] = scale_x + bbox[1] * (1 - scale_x) A : Tuple = bbox[2] * scale_y A : Any = scale_x + bbox[3] * (1 - scale_x) A : List[str] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left A : Optional[Any] = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) ) A : Any = img for bbox in img_annos: A : Optional[Any] = bbox[1] * scale_x A : List[str] = scale_y + bbox[2] * (1 - scale_y) A : Optional[Any] = bbox[3] * scale_x A : Any = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right A : List[str] = cva.resize( snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) A : str = img for bbox in img_annos: A : Dict = scale_x + bbox[1] * (1 - scale_x) A : int = scale_y + bbox[2] * (1 - scale_y) A : List[Any] = scale_x + bbox[3] * (1 - scale_x) A : Any = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: A : List[str] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" A : int = ascii_lowercase + digits return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) ) if __name__ == "__main__": main() print('DONE ✅')
3
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Union[str, Any] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Dict = logging.get_logger(__name__) lowercase : Dict = { 'facebook/data2vec-vision-base-ft': ( 'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''data2vec-vision''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.4 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=255 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : Tuple = hidden_size A : Optional[int] = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Dict = intermediate_size A : Any = hidden_act A : Optional[int] = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : List[Any] = initializer_range A : Any = layer_norm_eps A : Any = image_size A : Union[str, Any] = patch_size A : Optional[Any] = num_channels A : Union[str, Any] = use_mask_token A : int = use_absolute_position_embeddings A : List[str] = use_relative_position_bias A : List[Any] = use_shared_relative_position_bias A : str = layer_scale_init_value A : int = drop_path_rate A : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) A : Tuple = out_indices A : Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) A : int = use_auxiliary_head A : List[Any] = auxiliary_loss_weight A : str = auxiliary_channels A : Union[str, Any] = auxiliary_num_convs A : List[Any] = auxiliary_concat_input A : Dict = semantic_loss_ignore_index class A ( __snake_case ): __magic_name__ = version.parse('''1.11''' ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCAmelCase ( self ) -> float: """simple docstring""" return 1e-4
3
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Union[str, Any] = credit_card_number A : Optional[Any] = 0 A : Tuple = len(snake_case__ ) - 2 for i in range(snake_case__ , -1 , -2 ): # double the value of every second digit A : Dict = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 A : Union[str, Any] = cc_number[:i] + str(snake_case__ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(snake_case__ ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = F'{credit_card_number} is an invalid credit card number because' if not credit_card_number.isdigit(): print(F'{error_message} it has nonnumerical characters.' ) return False if not 13 <= len(snake_case__ ) <= 16: print(F'{error_message} of its length.' ) return False if not validate_initial_digits(snake_case__ ): print(F'{error_message} of its first two digits.' ) return False if not luhn_validation(snake_case__ ): print(F'{error_message} it fails the Luhn check.' ) return False print(F'{credit_card_number} is a valid credit card number.' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('4111111111111111') validate_credit_card_number('32323')
3
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase : Dict = logging.get_logger(__name__) lowercase : Any = {'vocab_file': 'sentencepiece.bpe.model'} lowercase : Dict = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } lowercase : Optional[Any] = { 'camembert-base': 5_12, } lowercase : Optional[int] = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" A : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token A : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) ) A : str = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> A : Optional[Any] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} A : List[str] = len(self.fairseq_tokens_to_ids ) A : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) A : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A : Optional[int] = [self.cls_token_id] A : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Optional[Any] = [self.sep_token_id] A : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = [] A : Union[str, Any] = '''''' A : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token A : Dict = True A : Optional[Any] = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) return out_string.strip() def __getstate__( self ) -> List[Any]: """simple docstring""" A : Dict = self.__dict__.copy() A : Optional[Any] = None return state def __setstate__( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A : Dict = {} A : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A : List[str] = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE , '''wb''' ) as fi: A : Tuple = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
1
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Any: """simple docstring""" A : Tuple = data A : Optional[Any] = previous A : Union[str, Any] = next_node def __str__( self ) -> str: """simple docstring""" return F'{self.data}' def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self.data def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return self.next def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return self.previous class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : List[str] = head def __iter__( self ) -> Any: """simple docstring""" return self def __lowerCAmelCase ( self ) -> int: """simple docstring""" if not self.current: raise StopIteration else: A : List[str] = self.current.get_data() A : Union[str, Any] = self.current.get_next() return value class A : def __init__( self ) -> Optional[Any]: """simple docstring""" A : int = None # First node in list A : str = None # Last node in list def __str__( self ) -> int: """simple docstring""" A : int = self.head A : Optional[int] = [] while current is not None: nodes.append(current.get_data() ) A : List[str] = current.get_next() return " ".join(str(SCREAMING_SNAKE_CASE ) for node in nodes ) def __contains__( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : str = self.head while current: if current.get_data() == value: return True A : Optional[int] = current.get_next() return False def __iter__( self ) -> int: """simple docstring""" return LinkedListIterator(self.head ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" if self.head: return self.head.get_data() return None def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" if self.tail: return self.tail.get_data() return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if self.head is None: A : Any = node A : List[Any] = node else: self.insert_before_node(self.head , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if self.head is None: self.set_head(SCREAMING_SNAKE_CASE ) else: self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : List[Any] = Node(SCREAMING_SNAKE_CASE ) if self.head is None: self.set_head(SCREAMING_SNAKE_CASE ) else: self.set_tail(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : Tuple = node A : int = node.previous if node.get_previous() is None: A : int = node_to_insert else: A : Tuple = node_to_insert A : Optional[int] = node_to_insert def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : Tuple = node A : int = node.next if node.get_next() is None: A : Optional[int] = node_to_insert else: A : Tuple = node_to_insert A : List[str] = node_to_insert def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : int = 1 A : int = Node(SCREAMING_SNAKE_CASE ) A : List[str] = self.head while node: if current_position == position: self.insert_before_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return current_position += 1 A : Any = node.next self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Node: """simple docstring""" A : str = self.head while node: if node.get_data() == item: return node A : int = node.get_next() raise Exception('''Node not found''' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if (node := self.get_node(SCREAMING_SNAKE_CASE )) is not None: if node == self.head: A : Optional[Any] = self.head.get_next() if node == self.tail: A : Union[str, Any] = self.tail.get_previous() self.remove_node_pointers(SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" if node.get_next(): A : Union[str, Any] = node.previous if node.get_previous(): A : Optional[int] = node.next A : int = None A : Optional[int] = None def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self.head is None def lowerCAmelCase_ ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
1
'''simple docstring''' lowercase : str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): A : Union[str, Any] = F'a bytes-like object is required, not \'{data.__class__.__name__}\'' raise TypeError(snake_case__ ) A : Union[str, Any] = ''''''.join(bin(snake_case__ )[2:].zfill(8 ) for byte in data ) A : List[str] = len(snake_case__ ) % 6 != 0 if padding_needed: # The padding that will be added later A : Union[str, Any] = B'''=''' * ((6 - len(snake_case__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(snake_case__ ) % 6) else: A : Optional[Any] = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(snake_case__ ) , 6 ) ).encode() + padding ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ): A : int = ( '''argument should be a bytes-like object or ASCII string, ''' F'not \'{encoded_data.__class__.__name__}\'' ) raise TypeError(snake_case__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(snake_case__ , snake_case__ ): try: A : List[str] = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) A : Any = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one A : int = encoded_data[:-padding] A : List[str] = ''''''.join( bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: A : int = ''''''.join( bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data ) A : List[str] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(snake_case__ ) , 8 ) ] return bytes(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap lowercase : Optional[int] = 'Usage of script: script_name <size_of_canvas:int>' lowercase : Any = [0] * 1_00 + [1] * 10 random.shuffle(choice) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = [[False for i in range(snake_case__ )] for j in range(snake_case__ )] return canvas def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i, row in enumerate(snake_case__ ): for j, _ in enumerate(snake_case__ ): A : Tuple = bool(random.getrandbits(1 ) ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = np.array(snake_case__ ) A : Dict = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(snake_case__ ): for c, pt in enumerate(snake_case__ ): A : List[str] = __judge_point( snake_case__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) A : Union[str, Any] = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. A : list[list[bool]] = current_canvas.tolist() return return_canvas def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = 0 A : int = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. A : Union[str, Any] = pt if pt: if alive < 2: A : Optional[int] = False elif alive == 2 or alive == 3: A : Optional[int] = True elif alive > 3: A : str = False else: if alive == 3: A : Optional[Any] = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) lowercase : Optional[int] = int(sys.argv[1]) # main working structure of this module. lowercase : Optional[int] = create_canvas(canvas_size) seed(c) lowercase , lowercase : str = plt.subplots() fig.show() lowercase : Any = ListedColormap(['w', 'k']) try: while True: lowercase : List[str] = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
3
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 lowercase : List[str] = get_tests_dir('fixtures') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = mock.Mock() A : Dict = 500 A : List[Any] = {} A : Optional[Any] = HTTPError A : Optional[Any] = {} # Download this model to make sure it's in the cache. A : List[str] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE ) as mock_head: A : Optional[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): # config is in subfolder, the following should not work without specifying the subfolder A : Tuple = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A : List[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @is_staging_test class A ( unittest.TestCase ): @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: """simple docstring""" A : List[Any] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[str] = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A : Dict = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( SCREAMING_SNAKE_CASE , repo_id='''test-image-processor''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : Tuple = ViTImageProcessor.from_pretrained(F'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A : Union[str, Any] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( SCREAMING_SNAKE_CASE , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : Dict = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" CustomImageProcessor.register_for_auto_class() A : List[Any] = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A : Optional[int] = AutoImageProcessor.from_pretrained( F'{USER}/test-dynamic-image-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
3
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
1
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Tuple = logging.get_logger(__name__) lowercase : Union[str, Any] = { 'huggingface/informer-tourism-monthly': ( 'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json' ), # See all Informer models at https://huggingface.co/models?filter=informer } class A ( __snake_case ): __magic_name__ = '''informer''' __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "student_t" , SCREAMING_SNAKE_CASE = "nll" , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "mean" , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 64 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "gelu" , SCREAMING_SNAKE_CASE = 0.05 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE = "prob" , SCREAMING_SNAKE_CASE = 5 , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Any = prediction_length A : Dict = context_length or prediction_length A : List[Any] = distribution_output A : List[str] = loss A : int = input_size A : List[Any] = num_time_features A : str = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] A : List[str] = scaling A : Any = num_dynamic_real_features A : str = num_static_real_features A : Optional[int] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) A : str = cardinality else: A : List[Any] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) A : int = embedding_dimension else: A : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] A : int = num_parallel_samples # Transformer architecture configuration A : str = input_size * len(self.lags_sequence ) + self._number_of_features A : Dict = d_model A : int = encoder_attention_heads A : Optional[Any] = decoder_attention_heads A : Union[str, Any] = encoder_ffn_dim A : int = decoder_ffn_dim A : Tuple = encoder_layers A : List[str] = decoder_layers A : Optional[int] = dropout A : List[Any] = attention_dropout A : List[Any] = activation_dropout A : int = encoder_layerdrop A : str = decoder_layerdrop A : Optional[Any] = activation_function A : List[str] = init_std A : List[str] = use_cache # Informer A : List[Any] = attention_type A : Tuple = sampling_factor A : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
3
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
1
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
1
'''simple docstring''' from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class A ( __snake_case ): __magic_name__ = '''new-model''' if is_tf_available(): class A ( __snake_case ): __magic_name__ = NewModelConfig @require_tf class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Dict = '''bert-base-cased''' A : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[int] = '''bert-base-cased''' A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[int] = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE ) A, A : Dict = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : int = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE ) A, A : str = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE ) A, A : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: A : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : int = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["bert-base-uncased"]: A : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow @require_tensorflow_probability def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE ) A, A : int = TFAutoModelForTableQuestionAnswering.from_pretrained( SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE ) , 14410 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE ) , 14410 ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : int = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[int] = copy.deepcopy(model.config ) A : str = ['''FunnelBaseModel'''] A : Any = TFAutoModel.from_config(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(SCREAMING_SNAKE_CASE ) A : Dict = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" try: AutoConfig.register('''new-model''' , SCREAMING_SNAKE_CASE ) A : List[str] = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): auto_class.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) auto_class.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): auto_class.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : Union[str, Any] = BertModelTester(self ).get_config() A : List[Any] = NewModelConfig(**tiny_config.to_dict() ) A : List[str] = auto_class.from_config(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[Any] = auto_class.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : Optional[int] = TFAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : List[str] = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): A : Any = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Dict = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: A : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint A : Any = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: A : List[Any] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
3
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float(moles / volume ) * nfactor ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float((moles * 0.08_21 * temperature) / (volume) ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float((moles * 0.08_21 * temperature) / (pressure) ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float((pressure * volume) / (0.08_21 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Union[str, Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Union[str, Any] = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig(image_size=192 ) if "base" in model_name: A : int = 6 A : Optional[int] = 128 A : Optional[int] = (2, 2, 18, 2) A : int = (4, 8, 16, 32) elif "large" in model_name: A : List[Any] = 12 A : List[Any] = 192 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) A : List[Any] = window_size A : Tuple = embed_dim A : Optional[int] = depths A : List[str] = num_heads return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "encoder.mask_token" in name: A : int = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: A : Dict = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: A : Any = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: A : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : int = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : Dict = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": A : str = '''layernorm.weight''' if name == "encoder.norm.bias": A : int = '''layernorm.bias''' if "decoder" in name: pass else: A : Tuple = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Tuple = orig_state_dict.pop(snake_case__ ) if "attn_mask" in key: pass elif "qkv" in key: A : List[str] = key.split('''.''' ) A : int = int(key_split[2] ) A : Tuple = int(key_split[4] ) A : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : List[str] = val[:dim, :] A : List[Any] = val[ dim : dim * 2, : ] A : str = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Optional[Any] = val[ -dim: ] else: A : Tuple = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = torch.load(snake_case__ , map_location='''cpu''' )['''model'''] A : Union[str, Any] = get_swin_config(snake_case__ ) A : Union[str, Any] = SwinForMaskedImageModeling(snake_case__ ) model.eval() A : Dict = convert_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) A : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Optional[Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) A : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : Optional[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) with torch.no_grad(): A : Any = model(**snake_case__ ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if push_to_hub: print(F'Pushing model and image processor for {model_name} to hub' ) model.push_to_hub(F'microsoft/{model_name}' ) image_processor.push_to_hub(F'microsoft/{model_name}' ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowercase : Optional[Any] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
3
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
1
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]: """simple docstring""" A : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE ): A : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : List[str] = parent A : str = batch_size A : Optional[Any] = seq_length A : List[str] = is_training A : List[Any] = use_input_mask A : Optional[Any] = use_token_type_ids A : Optional[Any] = use_labels A : List[str] = vocab_size A : Dict = hidden_size A : Union[str, Any] = num_hidden_layers A : Tuple = num_attention_heads A : Dict = intermediate_size A : Tuple = hidden_act A : List[Any] = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : int = max_position_embeddings A : int = type_vocab_size A : str = type_sequence_label_size A : int = initializer_range A : Optional[Any] = num_labels A : Optional[int] = num_choices A : Tuple = scope A : Dict = embedding_size def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Union[str, Any] = None if self.use_input_mask: A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) A : Dict = None if self.use_token_type_ids: A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Tuple = None A : str = None A : Any = None if self.use_labels: A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) A : Optional[Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : int = TFMobileBertModel(config=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A : List[str] = model(SCREAMING_SNAKE_CASE ) A : str = [input_ids, input_mask] A : List[str] = model(SCREAMING_SNAKE_CASE ) A : Dict = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Optional[int] = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE ) A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A : Dict = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE ) A : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A : Optional[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE ) A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : str = self.num_labels A : Tuple = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE ) A : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A : Dict = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Dict = self.num_choices A : Dict = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE ) A : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) A : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) A : Union[str, Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) A : Tuple = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A : Optional[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Dict = self.num_labels A : Optional[Any] = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE ) A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Union[str, Any] = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) A : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A : Any = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[str] = self.prepare_config_and_inputs() ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Any = config_and_inputs A : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["google/mobilebert-uncased"]: A : Optional[int] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_tf class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) A : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) A : str = model(SCREAMING_SNAKE_CASE )[0] A : Dict = [1, 6, 30522] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Union[str, Any] = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
3
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=__snake_case ): __magic_name__ = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def __lowerCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def __lowerCAmelCase ( cls , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
3
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Any = logging.get_logger(__name__) lowercase : Optional[Any] = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class A ( __snake_case ): __magic_name__ = '''markuplm''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=216 , SCREAMING_SNAKE_CASE=1001 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Dict: """simple docstring""" super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = vocab_size A : List[str] = hidden_size A : Dict = num_hidden_layers A : int = num_attention_heads A : Any = hidden_act A : str = intermediate_size A : Optional[Any] = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : str = max_position_embeddings A : str = type_vocab_size A : Any = initializer_range A : Tuple = layer_norm_eps A : List[Any] = position_embedding_type A : Tuple = use_cache A : Union[str, Any] = classifier_dropout # additional properties A : int = max_depth A : str = max_xpath_tag_unit_embeddings A : Optional[Any] = max_xpath_subs_unit_embeddings A : Union[str, Any] = tag_pad_id A : Tuple = subs_pad_id A : Dict = xpath_unit_hidden_size
3
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
1
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class A ( __snake_case ): __magic_name__ = ['''image_processor''', '''tokenizer'''] __magic_name__ = '''OwlViTImageProcessor''' __magic_name__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE , ) A : Optional[int] = kwargs.pop('''feature_extractor''' ) A : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="max_length" , SCREAMING_SNAKE_CASE="np" , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or (isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(text[0] , SCREAMING_SNAKE_CASE )): A : List[Any] = [self.tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )] elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(text[0] , SCREAMING_SNAKE_CASE ): A : str = [] # Maximum number of queries across batch A : List[Any] = max([len(SCREAMING_SNAKE_CASE ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(SCREAMING_SNAKE_CASE ) != max_num_queries: A : Dict = t + [''' '''] * (max_num_queries - len(SCREAMING_SNAKE_CASE )) A : int = self.tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) encodings.append(SCREAMING_SNAKE_CASE ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": A : str = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) A : int = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp A : str = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) A : Optional[int] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch A : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) A : Optional[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf A : Tuple = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) A : Optional[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) A : List[Any] = BatchEncoding() A : Optional[int] = input_ids A : int = attention_mask if query_images is not None: A : int = BatchEncoding() A : List[str] = self.image_processor( SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).pixel_values A : int = query_pixel_values if images is not None: A : Union[str, Any] = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text is not None and images is not None: A : Dict = image_features.pixel_values return encoding elif query_images is not None and images is not None: A : Tuple = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE ) , tensor_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE , ) return self.image_processor
3
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' _validate_point(snake_case__ ) _validate_point(snake_case__ ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if point: if isinstance(snake_case__ , snake_case__ ): for item in point: if not isinstance(snake_case__ , (int, float) ): A : Optional[Any] = ( '''Expected a list of numbers as input, found ''' F'{type(snake_case__ ).__name__}' ) raise TypeError(snake_case__ ) else: A : int = F'Expected a list of numbers as input, found {type(snake_case__ ).__name__}' raise TypeError(snake_case__ ) else: raise ValueError('''Missing an input''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' _validate_point(snake_case__ ) _validate_point(snake_case__ ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
1
'''simple docstring''' import operator as op lowercase : List[Any] = 'scaler.pt' lowercase : Dict = 'pytorch_model' lowercase : int = 'random_states' lowercase : List[str] = 'optimizer' lowercase : List[Any] = 'scheduler' lowercase : int = 'pytorch_model.bin' lowercase : Optional[int] = 'pytorch_model.bin.index.json' lowercase : str = 'model.safetensors' lowercase : List[str] = 'model.safetensors.index.json' lowercase : Optional[int] = '1.10.2' lowercase : Optional[Any] = 'py38' lowercase : List[Any] = '4.17.0' lowercase : Tuple = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] lowercase : Optional[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] lowercase : Dict = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] lowercase : int = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] lowercase : str = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] lowercase : int = '2.0.1' lowercase : int = ['pdsh', 'standard', 'openmpi', 'mvapich'] lowercase : Union[str, Any] = ['default', 'reduce-overhead', 'max-autotune'] lowercase : str = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowercase : int = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] lowercase : List[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] lowercase : Optional[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
3
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
1
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
3
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(snake_case__ ) == 0: raise ValueError('''Input list must be a non empty list''' ) if len(snake_case__ ) == 1: return True A : Any = series[1] - series[0] for index in range(len(snake_case__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' ) if len(snake_case__ ) == 0: raise ValueError('''Input list must be a non empty list''' ) A : Optional[Any] = 0 for val in series: answer += val return answer / len(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
1
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets lowercase : Any = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n' lowercase : Any = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' lowercase : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False ) -> List[str]: """simple docstring""" if rouge_types is None: A : Union[str, Any] = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] A : Any = rouge_scorer.RougeScorer(rouge_types=SCREAMING_SNAKE_CASE , use_stemmer=SCREAMING_SNAKE_CASE ) if use_aggregator: A : List[str] = scoring.BootstrapAggregator() else: A : Any = [] for ref, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = scorer.score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if use_aggregator: aggregator.add_scores(SCREAMING_SNAKE_CASE ) else: scores.append(SCREAMING_SNAKE_CASE ) if use_aggregator: A : int = aggregator.aggregate() else: A : List[str] = {} for key in scores[0]: A : Optional[Any] = [score[key] for score in scores] return result
3
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
1
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "geglu" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "layer_norm" , SCREAMING_SNAKE_CASE = False , ) -> Any: """simple docstring""" super().__init__() A : int = only_cross_attention A : Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' A : Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to' F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: A : Any = AdaLayerNorm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif self.use_ada_layer_norm_zero: A : List[str] = AdaLayerNormZero(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: A : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE ) A : Optional[Any] = Attention( query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , dropout=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. A : Optional[Any] = ( AdaLayerNorm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = Attention( query_dim=SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , dropout=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , upcast_attention=SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none else: A : int = None A : Optional[Any] = None # 3. Feed-forward A : Optional[int] = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE ) A : Dict = FeedForward(SCREAMING_SNAKE_CASE , dropout=SCREAMING_SNAKE_CASE , activation_fn=SCREAMING_SNAKE_CASE , final_dropout=SCREAMING_SNAKE_CASE ) # let chunk size default to None A : Dict = None A : Optional[int] = 0 def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Union[str, Any] = chunk_size A : Dict = dim def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> Dict: """simple docstring""" if self.use_ada_layer_norm: A : str = self.norma(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif self.use_ada_layer_norm_zero: A, A, A, A, A : int = self.norma( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype ) else: A : int = self.norma(SCREAMING_SNAKE_CASE ) A : Optional[int] = cross_attention_kwargs if cross_attention_kwargs is not None else {} A : Any = self.attna( SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) if self.use_ada_layer_norm_zero: A : Any = gate_msa.unsqueeze(1 ) * attn_output A : List[Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: A : Optional[int] = ( self.norma(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE ) ) A : Dict = self.attna( SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[str] = attn_output + hidden_states # 3. Feed-forward A : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm_zero: A : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' ) A : Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size A : Tuple = torch.cat( [self.ff(SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: A : Optional[int] = self.ff(SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm_zero: A : str = gate_mlp.unsqueeze(1 ) * ff_output A : Optional[int] = ff_output + hidden_states return hidden_states class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = "geglu" , SCREAMING_SNAKE_CASE = False , ) -> int: """simple docstring""" super().__init__() A : int = int(dim * mult ) A : Tuple = dim_out if dim_out is not None else dim if activation_fn == "gelu": A : Any = GELU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if activation_fn == "gelu-approximate": A : int = GELU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , approximate='''tanh''' ) elif activation_fn == "geglu": A : List[str] = GEGLU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif activation_fn == "geglu-approximate": A : Union[str, Any] = ApproximateGELU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = nn.ModuleList([] ) # project in self.net.append(SCREAMING_SNAKE_CASE ) # project dropout self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE ) ) # project out self.net.append(nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" for module in self.net: A : List[Any] = module(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "none" ) -> Tuple: """simple docstring""" super().__init__() A : int = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = approximate def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : str = self.proj(SCREAMING_SNAKE_CASE ) A : Tuple = self.gelu(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() A : str = nn.Linear(SCREAMING_SNAKE_CASE , dim_out * 2 ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A, A : Optional[int] = self.proj(SCREAMING_SNAKE_CASE ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(SCREAMING_SNAKE_CASE ) class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__() A : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Union[str, Any] = self.proj(SCREAMING_SNAKE_CASE ) return x * torch.sigmoid(1.702 * x ) class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" super().__init__() A : Any = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = nn.SiLU() A : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE , embedding_dim * 2 ) A : Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : List[Any] = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE ) ) ) A, A : str = torch.chunk(SCREAMING_SNAKE_CASE , 2 ) A : List[str] = self.norm(SCREAMING_SNAKE_CASE ) * (1 + scale) + shift return x class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" super().__init__() A : List[Any] = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = nn.SiLU() A : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE ) A : Any = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE , eps=1e-6 ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Any: """simple docstring""" A : Tuple = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hidden_dtype=SCREAMING_SNAKE_CASE ) ) ) A, A, A, A, A, A : str = emb.chunk(6 , dim=1 ) A : Union[str, Any] = self.norm(SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class A ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1e-5 ) -> Optional[int]: """simple docstring""" super().__init__() A : str = num_groups A : Union[str, Any] = eps if act_fn is None: A : Optional[int] = None else: A : Dict = get_activation(SCREAMING_SNAKE_CASE ) A : Tuple = nn.Linear(SCREAMING_SNAKE_CASE , out_dim * 2 ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" if self.act: A : Dict = self.act(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = self.linear(SCREAMING_SNAKE_CASE ) A : Optional[int] = emb[:, :, None, None] A, A : Dict = emb.chunk(2 , dim=1 ) A : Dict = F.group_norm(SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps ) A : Optional[int] = x * (1 + scale) + shift return x
3
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(snake_case__ ): for j in range(snake_case__ ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = [[float('''inf''' ) for _ in range(snake_case__ )] for _ in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(snake_case__ ): # looping through rows of graph array for i in range(snake_case__ ): # looping through columns of graph array for j in range(snake_case__ ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): A : List[str] = dist[i][k] + dist[k][j] _print_dist(snake_case__ , snake_case__ ) return dist, v if __name__ == "__main__": lowercase : Dict = int(input('Enter number of vertices: ')) lowercase : Union[str, Any] = int(input('Enter number of edges: ')) lowercase : int = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): lowercase : Any = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) lowercase : Union[str, Any] = int(input('Enter source:')) lowercase : Optional[int] = int(input('Enter destination:')) lowercase : Tuple = float(input('Enter weight:')) lowercase : List[str] = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
3
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase : Optional[Any] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : str = size if size is not None else {'''shortest_edge''': 384} A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : str = do_resize A : List[Any] = size # Default value set here for backwards compatibility where the value in config is None A : List[Any] = crop_pct if crop_pct is not None else 224 / 256 A : Optional[int] = resample A : Union[str, Any] = do_rescale A : List[str] = rescale_factor A : Union[str, Any] = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' ) A : Any = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct A : Dict = int(shortest_edge / crop_pct ) A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) else: # warping (no cropping) when evaluated at 384 or larger return resize( SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image: """simple docstring""" A : int = do_resize if do_resize is not None else self.do_resize A : Tuple = crop_pct if crop_pct is not None else self.crop_pct A : Optional[Any] = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : List[str] = image_std if image_std is not None else self.image_std A : Union[str, Any] = size if size is not None else self.size A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) A : Any = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] A : Optional[int] = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy lowercase : List[str] = logging.getLogger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ): '''simple docstring''' A : Optional[Any] = bnb_quantization_config.load_in_abit A : Tuple = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) A : Optional[Any] = [] # custom device map if isinstance(snake_case__ , snake_case__ ) and len(device_map.keys() ) > 1: A : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A : Optional[int] = get_keys_to_not_convert(snake_case__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(snake_case__ ) A : Dict = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A : Any = [] A : Tuple = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(snake_case__ ) # compatibility with peft A : Union[str, Any] = load_in_abit A : Dict = load_in_abit A : Tuple = get_parameter_device(snake_case__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) A : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__ ) # convert param to the right dtype A : Dict = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A : str = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) A : str = getattr(snake_case__ , snake_case__ , snake_case__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(snake_case__ ): param.to(snake_case__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F'The model device type is {model_device.type}. However, cuda is needed for quantization.' '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): A : Optional[Any] = replace_with_bnb_layers( snake_case__ , snake_case__ , modules_to_not_convert=snake_case__ ) A : Dict = get_quantized_model_device_map( snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A : List[Any] = True A : Optional[int] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ): '''simple docstring''' if device_map is None: if torch.cuda.is_available(): A : List[Any] = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(snake_case__ , snake_case__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) A : str = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A : Union[str, Any] = {} A : Any = special_dtypes A : Dict = no_split_module_classes A : Tuple = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A : List[Any] = get_balanced_memory( snake_case__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=snake_case__ , **snake_case__ , ) A : Dict = max_memory A : Optional[int] = infer_auto_device_map(snake_case__ , **snake_case__ ) if isinstance(snake_case__ , snake_case__ ): # check if don't have any quantized module on the cpu A : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A : List[str] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ): '''simple docstring''' if modules_to_not_convert is None: A : Optional[int] = [] A, A : Tuple = _replace_with_bnb_layers( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ): '''simple docstring''' A : Optional[int] = False for name, module in model.named_children(): if current_key_name is None: A : List[str] = [] current_key_name.append(snake_case__ ) if isinstance(snake_case__ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A : Union[str, Any] = '''.'''.join(snake_case__ ) A : int = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A : Optional[int] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A : Optional[Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A : Dict = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) A : str = module.weight.data if module.bias is not None: A : Tuple = module.bias.data bnb_module.requires_grad_(snake_case__ ) setattr(snake_case__ , snake_case__ , snake_case__ ) A : Tuple = True if len(list(module.children() ) ) > 0: A, A : List[Any] = _replace_with_bnb_layers( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A : List[Any] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' with init_empty_weights(): A : Tuple = deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A : Optional[Any] = find_tied_parameters(snake_case__ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case__ , snake_case__ ): A : Optional[int] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A : Union[str, Any] = sum(snake_case__ , [] ) A : str = len(snake_case__ ) > 0 # Check if it is a base model A : Optional[int] = False if hasattr(snake_case__ , '''base_model_prefix''' ): A : Tuple = not hasattr(snake_case__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A : Tuple = list(model.named_children() ) A : Tuple = [list_modules[-1][0]] # add last module together with tied weights A : Optional[Any] = set(snake_case__ ) - set(snake_case__ ) A : Optional[int] = list(set(snake_case__ ) ) + list(snake_case__ ) # remove ".weight" from the keys A : Tuple = ['''.weight''', '''.bias'''] A : Any = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A : int = name.replace(snake_case__ , '''''' ) filtered_module_names.append(snake_case__ ) return filtered_module_names def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for m in model.modules(): if isinstance(snake_case__ , bnb.nn.Linearabit ): return True return False def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return next(parameter.parameters() ).device def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if fpaa_statistics is None: set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__ ) A : str = param_name A : Tuple = model if "." in tensor_name: A : str = tensor_name.split('''.''' ) for split in splits[:-1]: A : int = getattr(snake_case__ , snake_case__ ) if new_module is None: raise ValueError(F'{module} has no attribute {split}.' ) A : Dict = new_module A : int = splits[-1] # offload weights A : Tuple = False offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__ ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , snake_case__ , index=snake_case__ , ) else: offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__ ) offload_weight(snake_case__ , param_name.replace('''weight''' , '''SCB''' ) , snake_case__ , index=snake_case__ ) set_module_tensor_to_device(snake_case__ , snake_case__ , '''meta''' , dtype=snake_case__ , value=torch.empty(*param.size() ) )
3
'''simple docstring''' import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: with self.subTest(SCREAMING_SNAKE_CASE ): A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in ["bert-base-cased", "bert-large-uncased"]: A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["roberta-base", "roberta-large"]: A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**SCREAMING_SNAKE_CASE ): return model(**SCREAMING_SNAKE_CASE ) eval(**SCREAMING_SNAKE_CASE ).block_until_ready() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ): A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
3
1
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = VideoMAEConfig() set_architecture_configs(snake_case__ , snake_case__ ) if "finetuned" not in model_name: A : int = False if "finetuned" in model_name: A : List[Any] = '''huggingface/label-files''' if "kinetics" in model_name: A : int = 400 A : str = '''kinetics400-id2label.json''' elif "ssv2" in model_name: A : Dict = 174 A : Dict = '''something-something-v2-id2label.json''' else: raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' ) A : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : int = {int(snake_case__ ): v for k, v in idalabel.items()} A : Union[str, Any] = idalabel A : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if "small" in model_name: A : int = 384 A : Tuple = 1536 A : Optional[Any] = 12 A : List[str] = 16 A : Optional[Any] = 12 A : Optional[Any] = 3 A : List[Any] = 192 A : List[str] = 768 elif "large" in model_name: A : Optional[int] = 1024 A : Optional[Any] = 4096 A : Tuple = 24 A : List[Any] = 16 A : Tuple = 12 A : Union[str, Any] = 8 A : Optional[int] = 512 A : str = 2048 elif "huge" in model_name: A : List[Any] = 1280 A : Tuple = 5120 A : Tuple = 32 A : Optional[Any] = 16 A : Dict = 12 A : List[str] = 8 A : str = 640 A : Optional[int] = 2560 elif "base" not in model_name: raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "encoder." in name: A : List[str] = name.replace('''encoder.''' , '''''' ) if "cls_token" in name: A : Any = name.replace('''cls_token''' , '''videomae.embeddings.cls_token''' ) if "decoder_pos_embed" in name: A : Dict = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' ) if "pos_embed" in name and "decoder" not in name: A : Tuple = name.replace('''pos_embed''' , '''videomae.embeddings.position_embeddings''' ) if "patch_embed.proj" in name: A : List[str] = name.replace('''patch_embed.proj''' , '''videomae.embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : int = name.replace('''patch_embed.norm''' , '''videomae.embeddings.norm''' ) if "decoder.blocks" in name: A : List[Any] = name.replace('''decoder.blocks''' , '''decoder.decoder_layers''' ) if "blocks" in name: A : Union[str, Any] = name.replace('''blocks''' , '''videomae.encoder.layer''' ) if "attn.proj" in name: A : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name and "bias" not in name: A : str = name.replace('''attn''' , '''attention.self''' ) if "attn" in name: A : Any = name.replace('''attn''' , '''attention.attention''' ) if "norm1" in name: A : Dict = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : List[str] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : Dict = name.replace('''mlp.fc2''' , '''output.dense''' ) if "decoder_embed" in name: A : Tuple = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' ) if "decoder_norm" in name: A : int = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' ) if "decoder_pred" in name: A : List[str] = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: A : List[str] = name.replace('''norm.weight''' , '''videomae.layernorm.weight''' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: A : str = name.replace('''norm.bias''' , '''videomae.layernorm.bias''' ) if "head" in name and "decoder" not in name: A : Tuple = name.replace('''head''' , '''classifier''' ) return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : List[Any] = orig_state_dict.pop(snake_case__ ) if key.startswith('''encoder.''' ): A : str = key.replace('''encoder.''' , '''''' ) if "qkv" in key: A : List[Any] = key.split('''.''' ) if key.startswith('''decoder.blocks''' ): A : str = config.decoder_hidden_size A : List[str] = int(key_split[2] ) A : Any = '''decoder.decoder_layers.''' if "weight" in key: A : Optional[Any] = val[:dim, :] A : Union[str, Any] = val[dim : dim * 2, :] A : List[str] = val[-dim:, :] else: A : str = config.hidden_size A : str = int(key_split[1] ) A : Dict = '''videomae.encoder.layer.''' if "weight" in key: A : str = val[:dim, :] A : Tuple = val[dim : dim * 2, :] A : List[str] = val[-dim:, :] else: A : Union[str, Any] = val return orig_state_dict def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) A : Any = np.load(snake_case__ ) return list(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = get_videomae_config(snake_case__ ) if "finetuned" in model_name: A : int = VideoMAEForVideoClassification(snake_case__ ) else: A : List[str] = VideoMAEForPreTraining(snake_case__ ) # download original checkpoint, hosted on Google Drive A : str = '''pytorch_model.bin''' gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) if "model" in files: A : Optional[Any] = files['''model'''] else: A : Tuple = files['''module'''] A : Dict = convert_state_dict(snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) model.eval() # verify model on basic input A : Union[str, Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) A : Any = prepare_video() A : Union[str, Any] = image_processor(snake_case__ , return_tensors='''pt''' ) if "finetuned" not in model_name: A : str = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) A : Optional[Any] = torch.load(snake_case__ ) A : List[str] = model(**snake_case__ ) A : Optional[int] = outputs.logits A : List[str] = [ '''videomae-small-finetuned-kinetics''', '''videomae-small-finetuned-ssv2''', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) '''videomae-base-short''', '''videomae-base-short-finetuned-kinetics''', '''videomae-base''', '''videomae-base-finetuned-kinetics''', '''videomae-large''', '''videomae-large-finetuned-kinetics''', '''videomae-huge-finetuned-kinetics''', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) '''videomae-base-short-ssv2''', '''videomae-base-short-finetuned-ssv2''', '''videomae-base-ssv2''', '''videomae-base-finetuned-ssv2''', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": A : int = torch.Size([1, 400] ) A : List[Any] = torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": A : Any = torch.Size([1, 174] ) A : Union[str, Any] = torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": A : List[str] = torch.Size([1, 1408, 1536] ) A : Union[str, Any] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": A : List[str] = torch.Size([1, 1408, 1536] ) A : Optional[Any] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one A : List[str] = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": A : List[str] = torch.Size([1, 1408, 1536] ) A : Any = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": A : Any = torch.Size([1, 400] ) A : str = torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": A : Optional[int] = torch.Size([1, 400] ) A : str = torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": A : List[Any] = torch.Size([1, 400] ) A : List[Any] = torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": A : Union[str, Any] = torch.Size([1, 400] ) A : List[Any] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": A : Any = torch.Size([1, 1408, 1536] ) A : List[str] = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": A : Optional[Any] = torch.Size([1, 174] ) A : Optional[Any] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": A : str = torch.Size([1, 1408, 1536] ) A : Union[str, Any] = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": A : Optional[int] = torch.Size([1, 174] ) A : str = torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(F'Model name not supported. Should be one of {model_names}' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , snake_case__ , atol=1E-4 ) else: print('''Logits:''' , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 ) print('''Logits ok!''' ) # verify loss, if applicable if model_name == "videomae-base-short": A : List[str] = outputs.loss assert torch.allclose(snake_case__ , snake_case__ , atol=1E-4 ) print('''Loss ok!''' ) if pytorch_dump_folder_path is not None: print(F'Saving model and image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) model.save_pretrained(snake_case__ ) if push_to_hub: print('''Pushing to the hub...''' ) model.push_to_hub(snake_case__ , organization='''nielsr''' ) if __name__ == "__main__": lowercase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) lowercase : List[Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
3
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Union[str, Any] = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : Tuple = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Tuple = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case__ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = {} A : str = R'''.*sequential.(\d+).*''' A : Union[str, Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : Any = key.replace(snake_case__ , snake_case__ ) if re.match(snake_case__ , snake_case__ ): # replace sequential layers with list A : Any = re.match(snake_case__ , snake_case__ ).group(1 ) A : List[str] = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case__ )//3}.linear.' ) elif re.match(snake_case__ , snake_case__ ): A : Union[str, Any] = int(re.match(snake_case__ , snake_case__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : str = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : int = value A : List[Any] = mixed_qkv.size(0 ) // 3 A : Union[str, Any] = mixed_qkv[:qkv_dim] A : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2] A : Optional[int] = mixed_qkv[qkv_dim * 2 :] A : Tuple = query_layer A : Union[str, Any] = key_layer A : Optional[int] = value_layer else: A : Dict = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = init_clap(snake_case__ , enable_fusion=snake_case__ ) clap_model.eval() A : str = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case__ ) A : Tuple = ClapConfig() A : str = enable_fusion A : str = ClapModel(snake_case__ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case__ , strict=snake_case__ ) model.save_pretrained(snake_case__ ) transformers_config.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : Tuple = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
3
1
'''simple docstring''' import itertools import math def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def lowerCAmelCase_ ( snake_case__ = 1_0001 ): '''simple docstring''' return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
3
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging lowercase : Dict = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: A : Union[str, Any] = os.path.abspath(snake_case__ ) logger.info(F'Loading PyTorch weights from {pt_path}' ) A : Any = torch.load(snake_case__ , map_location='''cpu''' ) logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' ) A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean A : Tuple = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var A : Dict = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding A : Any = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): A : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight A : Dict = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias A : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 A : Dict = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): A : List[Any] = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): A : List[str] = pt_tuple_key[-2] + '''_v''' if name is not None: A : int = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()} A : int = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: A : List[str] = flax_model.params['''params'''] else: A : Dict = flax_model.params A : List[Any] = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(snake_case__ ) A : int = {} A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Any = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Dict = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: A : Tuple = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : List[str] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch # Load the index A : Union[str, Any] = {} for shard_file in shard_filenames: # load using msgpack utils A : List[str] = torch.load(snake_case__ ) A : int = {k: v.numpy() for k, v in pt_state_dict.items()} A : Tuple = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: A : Optional[int] = flax_model.params['''params'''] A : List[Any] = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: A : Dict = flax_model.params A : Tuple = flatten_dict(snake_case__ ) A : List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) A : List[str] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): A : int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary A : List[str] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: A : Union[str, Any] = pt_tuple_key[1:] # Correctly rename weight parameters A, A : Any = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary A : int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: A : int = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: A : Optional[int] = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown A : Optional[Any] = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = os.path.abspath(snake_case__ ) logger.info(F'Loading Flax weights from {flax_checkpoint_path}' ) # import correct flax class A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , '''rb''' ) as state_f: try: A : int = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) A : Optional[Any] = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) A : Union[str, Any] = flatten_dict(snake_case__ ) A : List[Any] = pt_model.state_dict() A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) A : Tuple = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys A : int = [] A : Any = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: A : List[str] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer A : Tuple = flax_key_tuple[:-1] + ('''weight''',) A : Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: A : Tuple = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: A : Union[str, Any] = '''.'''.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. A : int = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: A : Optional[int] = key.split('''.''' ) A : Dict = None if key_components[-3::2] == ["parametrizations", "original0"]: A : List[str] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: A : List[Any] = key_components[-2] + '''_v''' if name is not None: A : str = key_components[:-3] + [name] A : Optional[Any] = '''.'''.join(snake_case__ ) A : Optional[Any] = key if flax_key in special_pt_names: A : Optional[Any] = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ' F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) else: # add weight to pytorch dict A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor A : Dict = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list A : List[Any] = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing' F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' ) if len(snake_case__ ) > 0: logger.warning( F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly' F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to' ''' use it for predictions and inference.''' ) else: logger.warning( F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'you can already use {pt_model.__class__.__name__} for predictions without further training.' ) return pt_model
3
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : List[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : Tuple = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Optional[int] = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : Union[str, Any] = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
3
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__snake_case ) class A ( __snake_case ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __magic_name__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) __magic_name__ = Features({'''text''': Value('''string''' )} ) __magic_name__ = Features({'''summary''': Value('''string''' )} ) __magic_name__ = "text" __magic_name__ = "summary" @property def __lowerCAmelCase ( self ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text", self.summary_column: "summary"}
3
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowercase : str = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class A ( __snake_case ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : Dict = {}, {} if padding is not None: A : List[str] = padding if truncation is not None: A : Dict = truncation if top_k is not None: A : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = {'''image''': image, '''question''': question} else: A : Any = image A : Any = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return results def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : Union[str, Any] = load_image(inputs['''image'''] ) A : Optional[Any] = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) A : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(SCREAMING_SNAKE_CASE ) return model_inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.model(**SCREAMING_SNAKE_CASE ) return model_outputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> int: """simple docstring""" if top_k > self.model.config.num_labels: A : Dict = self.model.config.num_labels if self.framework == "pt": A : Optional[int] = model_outputs.logits.sigmoid()[0] A, A : int = probs.topk(SCREAMING_SNAKE_CASE ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) A : int = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
3
1
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : str = { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json', 'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json', 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json' ), 'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json', 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json', 'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json', 'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json', 'cl-tohoku/bert-base-japanese-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json' ), 'cl-tohoku/bert-base-japanese-char-whole-word-masking': ( 'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json' ), 'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json', # See all BERT models at https://huggingface.co/models?filter=bert } class A ( __snake_case ): __magic_name__ = '''bert''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = vocab_size A : Optional[Any] = hidden_size A : List[Any] = num_hidden_layers A : List[str] = num_attention_heads A : Dict = hidden_act A : Optional[Any] = intermediate_size A : List[Any] = hidden_dropout_prob A : List[Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : List[str] = type_vocab_size A : Dict = initializer_range A : str = layer_norm_eps A : int = position_embedding_type A : Dict = use_cache A : str = classifier_dropout class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A : Optional[int] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = 0 A : Any = len(snake_case__ ) for i in range(n - 1 ): for j in range(i + 1 , snake_case__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return arr, 0 A : Any = len(snake_case__ ) // 2 A : List[Any] = arr[0:mid] A : Union[str, Any] = arr[mid:] A, A : List[str] = count_inversions_recursive(snake_case__ ) A, A : Dict = count_inversions_recursive(snake_case__ ) A, A : Dict = _count_cross_inversions(snake_case__ , snake_case__ ) A : Optional[int] = inversion_p + inversions_q + cross_inversions return c, num_inversions def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = [] A : Optional[Any] = 0 while i < len(snake_case__ ) and j < len(snake_case__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(snake_case__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(snake_case__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def lowerCAmelCase_ ( ): '''simple docstring''' A : Tuple = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) A : List[Any] = count_inversions_bf(snake_case__ ) A, A : int = count_inversions_recursive(snake_case__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , snake_case__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() A : Tuple = count_inversions_bf(snake_case__ ) A, A : Optional[int] = count_inversions_recursive(snake_case__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , snake_case__ ) # an empty list should also have zero inversions A : int = [] A : List[Any] = count_inversions_bf(snake_case__ ) A, A : Optional[int] = count_inversions_recursive(snake_case__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , snake_case__ ) if __name__ == "__main__": main()
3
'''simple docstring''' import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : str = BeautifulSoup(requests.get(snake_case__ , params=snake_case__ ).content , '''html.parser''' ) A : Dict = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) A : Optional[int] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase : str = { 'title': ( 'Precisely geometry controlled microsupercapacitors for ultrahigh areal ' 'capacitance, volumetric capacitance, and energy density' ), 'journal': 'Chem. Mater.', 'volume': 30, 'pages': '3979-3990', 'year': 20_18, 'hl': 'en', } print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
3
1
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A : __magic_name__ = 42 __magic_name__ = None __magic_name__ = None lowercase : Union[str, Any] = namedtuple('CoinsDistribResult', 'moves excess') def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(snake_case__ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(snake_case__ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(snake_case__ ) != count_coins(snake_case__ ): raise ValueError('''The nodes number should be same as the number of coins''' ) # Main calculation def get_distrib(snake_case__ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) A, A : Optional[int] = get_distrib(node.left ) A, A : List[str] = get_distrib(node.right ) A : List[Any] = 1 - left_distrib_excess A : int = 1 - right_distrib_excess A : Union[str, Any] = ( left_distrib_moves + right_distrib_moves + abs(snake_case__ ) + abs(snake_case__ ) ) A : Optional[int] = node.data - coins_to_left - coins_to_right return CoinsDistribResult(snake_case__ , snake_case__ ) return get_distrib(snake_case__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = None A : Optional[Any] = None A : Tuple = graph self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Dict = len(SCREAMING_SNAKE_CASE ) A : Optional[Any] = None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if sources is int: A : Dict = [sources] if sinks is int: A : str = [sinks] if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0: return A : Optional[int] = sources[0] A : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1: A : Optional[int] = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A : Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A : Dict = max_input_flow A : Tuple = 0 A : Tuple = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A : Optional[Any] = max_input_flow A : Optional[Any] = size - 1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[Any] = algorithm(self ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = flow_network A : Optional[Any] = flow_network.verticesCount A : Tuple = flow_network.sourceIndex A : Dict = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A : str = flow_network.graph A : Optional[Any] = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" if not self.executed: self._algorithm() A : Optional[int] = True def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) # use this to save your result A : List[str] = -1 def __lowerCAmelCase ( self ) -> str: """simple docstring""" if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE ) A : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )] A : Union[str, Any] = [0] * self.verticies_count A : List[Any] = [0] * self.verticies_count def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A : Optional[Any] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A : Union[str, Any] = 0 while i < len(SCREAMING_SNAKE_CASE ): A : str = vertices_list[i] A : List[str] = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) ) A : int = 0 else: i += 1 A : Optional[Any] = sum(self.preflow[self.source_index] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.relabel(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Dict = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Dict = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A : Dict = self.heights[to_index] if min_height is not None: A : Dict = min_height + 1 if __name__ == "__main__": lowercase : Optional[int] = [0] lowercase : List[Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowercase : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowercase : List[str] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowercase : List[str] = flow_network.find_maximum_flow() print(f'''maximum flow is {maximum_flow}''')
3
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ) or n < 0: raise ValueError('''Invalid input''' ) A : List[str] = 10**n A : Tuple = 2_8433 * (pow(2 , 783_0457 , snake_case__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(10) = }''')
3
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A ( __snake_case , unittest.TestCase ): __magic_name__ = KandinskyVaaImgaImgPipeline __magic_name__ = ['''image_embeds''', '''negative_image_embeds''', '''image'''] __magic_name__ = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] __magic_name__ = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] __magic_name__ = False @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return 32 @property def __lowerCAmelCase ( self ) -> Any: """simple docstring""" return 32 @property def __lowerCAmelCase ( self ) -> str: """simple docstring""" return self.time_input_dim @property def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return 100 @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) A : Union[str, Any] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } A : Any = UNetaDConditionModel(**SCREAMING_SNAKE_CASE ) return model @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) A : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[int] = self.dummy_unet A : List[Any] = self.dummy_movq A : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.00_085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } A : List[Any] = DDIMScheduler(**SCREAMING_SNAKE_CASE ) A : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> str: """simple docstring""" A : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) A : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( SCREAMING_SNAKE_CASE ) # create init_image A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) A : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] A : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) ) if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ): A : int = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: A : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) A : Tuple = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = '''cpu''' A : Tuple = self.get_dummy_components() A : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE ) A : Any = pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) ) A : List[Any] = output.images A : Union[str, Any] = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0] A : List[str] = image[0, -3:, -3:, -1] A : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A : Optional[int] = np.array( [0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) A : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) A : int = '''A red cartoon frog, 4k''' A : Tuple = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(SCREAMING_SNAKE_CASE ) A : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) A : Dict = pipeline.to(SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) A : str = torch.Generator(device='''cpu''' ).manual_seed(0 ) A, A : List[str] = pipe_prior( SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() A : Optional[int] = pipeline( image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) A : List[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
3
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : str = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A ( __snake_case ): __magic_name__ = '''gpt_neo''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" A : Union[str, Any] = vocab_size A : Optional[Any] = max_position_embeddings A : Dict = hidden_size A : Optional[Any] = num_layers A : Tuple = num_heads A : int = intermediate_size A : Optional[Any] = window_size A : List[Any] = activation_function A : Union[str, Any] = resid_dropout A : Any = embed_dropout A : List[Any] = attention_dropout A : str = classifier_dropout A : List[Any] = layer_norm_epsilon A : str = initializer_range A : List[str] = use_cache A : Optional[int] = bos_token_id A : List[Any] = eos_token_id A : int = attention_types A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE ) if len(self.attention_layers ) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''' ) super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : List[str] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' import torch A : Tuple = input.size() A : Union[str, Any] = len(snake_case__ ) A : List[str] = shape[dimension] A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ ) A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1 A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None] A : str = [slice(snake_case__ )] * rank A : List[Any] = indices A : Union[str, Any] = input[s] A : List[str] = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import torch A : List[str] = torch.arange(1 , snake_case__ ) A : Optional[int] = torch.remainder(snake_case__ , snake_case__ ) A : Optional[int] = remainders == 0 A : Optional[Any] = candidates[divisor_indices] A : Optional[int] = torch.max(snake_case__ ) return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' ) class A ( __snake_case ): @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' ) A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: A : Dict = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return self._config.num_heads def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]: """simple docstring""" A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs( SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE ) # We need to order the input in the way they appears in the forward() A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch A, A : Dict = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values A : str = seqlen + 2 A : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A : Any = [ (torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers ) ] A : str = common_inputs['''attention_mask'''] if self.use_past: A : Optional[int] = ordered_inputs['''attention_mask'''].dtype A : List[str] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" return 13
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def lowerCAmelCase_ ( ): '''simple docstring''' assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
3
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = [] A : Union[str, Any] = [] for i in range(self.num_layers ): A : Any = self.in_channels if i == 0 else self.out_channels A : Optional[Any] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Optional[int] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnets A : Union[str, Any] = attentions if self.add_downsample: A : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = () for resnet, attn in zip(self.resnets , self.attentions ): A : int = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Dict = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = [] for i in range(self.num_layers ): A : Optional[Any] = self.in_channels if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets if self.add_downsample: A : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: """simple docstring""" A : str = () for resnet in self.resnets: A : Optional[int] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) if self.add_downsample: A : Optional[int] = self.downsamplers_a(SCREAMING_SNAKE_CASE ) output_states += (hidden_states,) return hidden_states, output_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [] A : Optional[int] = [] for i in range(self.num_layers ): A : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : Dict = self.prev_output_channel if i == 0 else self.out_channels A : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : int = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Dict = resnets A : Optional[Any] = attentions if self.add_upsample: A : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: """simple docstring""" for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states A : List[str] = res_hidden_states_tuple[-1] A : int = res_hidden_states_tuple[:-1] A : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Tuple = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : Dict = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = True __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = [] for i in range(self.num_layers ): A : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels A : List[str] = self.prev_output_channel if i == 0 else self.out_channels A : str = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[Any] = resnets if self.add_upsample: A : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Tuple: """simple docstring""" for resnet in self.resnets: # pop res hidden states A : Optional[int] = res_hidden_states_tuple[-1] A : Optional[Any] = res_hidden_states_tuple[:-1] A : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) A : Optional[Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) if self.add_upsample: A : List[str] = self.upsamplers_a(SCREAMING_SNAKE_CASE ) return hidden_states class A ( nn.Module ): __magic_name__ = 42 __magic_name__ = 0.0 __magic_name__ = 1 __magic_name__ = 1 __magic_name__ = False __magic_name__ = False __magic_name__ = jnp.floataa def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] A : List[Any] = [] for _ in range(self.num_layers ): A : int = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(SCREAMING_SNAKE_CASE ) A : List[str] = resnets A : List[str] = attentions def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict: """simple docstring""" A : Optional[Any] = self.resnets[0](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): A : Optional[int] = attn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , deterministic=SCREAMING_SNAKE_CASE ) return hidden_states
3
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[Any] = tempfile.mkdtemp() A : List[str] = SamImageProcessor() A : Dict = SamProcessor(SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor def __lowerCAmelCase ( self ) -> Any: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A : List[str] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) A : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.get_image_processor() A : List[Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE ) A : List[Any] = self.prepare_image_inputs() A : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : Tuple = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[int] = self.get_image_processor() A : List[Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE ) A : Tuple = [torch.ones((1, 3, 5, 5) )] A : int = [[1764, 2646]] A : Tuple = [[683, 1024]] A : Optional[int] = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A : List[str] = processor.post_process_masks( SCREAMING_SNAKE_CASE , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np A : Union[str, Any] = [np.ones((1, 3, 5, 5) )] A : str = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A : Tuple = [[1, 0], [0, 1]] with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) ) @require_vision @require_tf class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Any = tempfile.mkdtemp() A : Any = SamImageProcessor() A : str = SamProcessor(SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor def __lowerCAmelCase ( self ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A : List[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) A : Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.get_image_processor() A : Dict = SamProcessor(image_processor=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = self.prepare_image_inputs() A : Tuple = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : str = self.get_image_processor() A : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE ) A : Dict = [tf.ones((1, 3, 5, 5) )] A : List[str] = [[1764, 2646]] A : List[Any] = [[683, 1024]] A : Any = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A : Any = processor.post_process_masks( SCREAMING_SNAKE_CASE , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np A : int = [np.ones((1, 3, 5, 5) )] A : int = processor.post_process_masks( SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A : Any = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): A : Dict = processor.post_process_masks( SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' ) @require_vision @require_torchvision class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Any = tempfile.mkdtemp() A : Any = SamImageProcessor() A : Optional[Any] = SamProcessor(SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A : Optional[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Tuple = self.get_image_processor() A : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE ) A : Optional[Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) A : List[str] = [tf.convert_to_tensor(SCREAMING_SNAKE_CASE )] A : Tuple = [torch.tensor(SCREAMING_SNAKE_CASE )] A : Dict = [[1764, 2646]] A : int = [[683, 1024]] A : List[Any] = processor.post_process_masks( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) A : Optional[int] = processor.post_process_masks( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.get_image_processor() A : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE ) A : Dict = self.prepare_image_inputs() A : List[str] = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )['''pixel_values'''].numpy() A : str = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )['''pixel_values'''].numpy() A : Tuple = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''tf''' )['''pixel_values'''].numpy() A : Any = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )['''pixel_values'''].numpy() self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
3
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , ) -> Dict: """simple docstring""" A : List[str] = parent A : int = batch_size A : Optional[int] = image_size A : int = num_channels A : List[Any] = embeddings_size A : str = hidden_sizes A : Any = depths A : List[str] = is_training A : Any = use_labels A : Any = hidden_act A : Tuple = num_labels A : List[str] = scope A : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Optional[Any] = None if self.use_labels: A : str = ids_tensor([self.batch_size] , self.num_labels ) A : List[Any] = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : int = RegNetModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[str] = model(SCREAMING_SNAKE_CASE ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : str = self.num_labels A : List[Any] = RegNetForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = self.prepare_config_and_inputs() A, A, A : Optional[int] = config_and_inputs A : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification} if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Any = RegNetModelTester(self ) A : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A, A : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Optional[int] = model_class(SCREAMING_SNAKE_CASE ) A : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Dict = [*signature.parameters.keys()] A : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Tuple = model_class(config=SCREAMING_SNAKE_CASE ) for name, module in model.named_modules(): if isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): A : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A : Any = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A, A : str = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A : Dict = layer_type A : Union[str, Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Dict = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Dict = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> str: """simple docstring""" return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE ) A : Dict = self.default_image_processor A : int = prepare_img() A : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : List[str] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : Tuple = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
3
'''simple docstring''' import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ ) A : Dict = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A : Dict = dataset_size < in_memory_max_size else: A : Tuple = False A : int = is_small_dataset(snake_case__ ) assert result == expected
3
1
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=36 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Dict: """simple docstring""" A : Optional[int] = parent A : Optional[int] = batch_size A : Union[str, Any] = seq_length A : Union[str, Any] = is_training A : Optional[Any] = use_input_mask A : Tuple = use_token_type_ids A : Optional[int] = use_labels A : Tuple = vocab_size A : Optional[Any] = embedding_size A : str = hidden_size A : Dict = num_hidden_layers A : str = num_hidden_groups A : Dict = num_attention_heads A : List[Any] = intermediate_size A : List[str] = hidden_act A : Optional[Any] = hidden_dropout_prob A : Any = attention_probs_dropout_prob A : Any = max_position_embeddings A : Any = type_vocab_size A : Dict = type_sequence_label_size A : Optional[int] = initializer_range A : List[Any] = num_labels A : int = num_choices A : Optional[int] = scope def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : str = None if self.use_input_mask: A : int = random_attention_mask([self.batch_size, self.seq_length] ) A : Tuple = None if self.use_token_type_ids: A : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Tuple = None A : List[Any] = None A : List[Any] = None if self.use_labels: A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Any = ids_tensor([self.batch_size] , self.num_choices ) A : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Any = AlbertModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Dict = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE ) A : Optional[int] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : int = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , sentence_order_label=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Any = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Any = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[Any] = self.num_labels A : str = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Optional[Any] = self.num_labels A : str = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = self.num_choices A : List[str] = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : List[str] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = self.prepare_config_and_inputs() ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : str = config_and_inputs A : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Dict = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE ): A : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) A : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE ) return inputs_dict def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Any = AlbertModelTester(self ) A : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A : str = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : int = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[Any] = AlbertModel.from_pretrained('''albert-base-v2''' ) A : Any = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) A : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0] A : str = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Optional[int] = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
3
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A ( __snake_case ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
3
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
'''simple docstring''' from scipy.stats import pearsonr import datasets lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" if return_pvalue: A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
3
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class A ( __snake_case ): __magic_name__ = ['''image_processor''', '''tokenizer'''] __magic_name__ = '''BlipImageProcessor''' __magic_name__ = '''AutoTokenizer''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # add QFormer tokenizer A : Tuple = qformer_tokenizer def __call__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify at least images or text.''' ) A : Tuple = BatchFeature() if text is not None: A : Tuple = self.tokenizer( text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) encoding.update(SCREAMING_SNAKE_CASE ) A : List[str] = self.qformer_tokenizer( text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , stride=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_overflowing_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , return_length=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : Optional[Any] = qformer_text_encoding.pop('''input_ids''' ) A : List[str] = qformer_text_encoding.pop('''attention_mask''' ) if images is not None: A : List[Any] = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) encoding.update(SCREAMING_SNAKE_CASE ) return encoding def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = self.tokenizer.model_input_names A : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" if os.path.isfile(SCREAMING_SNAKE_CASE ): raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) A : int = os.path.join(SCREAMING_SNAKE_CASE , '''qformer_tokenizer''' ) self.qformer_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) return super().save_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , subfolder='''qformer_tokenizer''' ) A : Union[str, Any] = cls._get_arguments_from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) args.append(SCREAMING_SNAKE_CASE ) return cls(*SCREAMING_SNAKE_CASE )
3
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase : Dict = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Dict = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) A : List[str] = b * b - 4 * a * c A : Optional[Any] = (-b + sqrt(snake_case__ )) / (2 * a) A : int = (-b - sqrt(snake_case__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def lowerCAmelCase_ ( ): '''simple docstring''' A, A : int = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
3
'''simple docstring''' import os import sys import unittest lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : List[Any] = {'''BertModelTest''': '''BertModelTester'''} A : int = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE ) A : List[str] = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } A : Union[str, Any] = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE ) A : Dict = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } A : str = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
3
1
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowercase : Optional[int] = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } lowercase : Optional[Any] = { '169M': 7_68, '430M': 10_24, '1B5': 20_48, '3B': 25_60, '7B': 40_96, '14B': 51_20, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = list(state_dict.keys() ) for name in state_dict_keys: A : str = state_dict.pop(snake_case__ ) # emb -> embedding if name.startswith('''emb.''' ): A : Optional[Any] = name.replace('''emb.''' , '''embeddings.''' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('''blocks.0.ln0''' ): A : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' ) # att -> attention A : int = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , snake_case__ ) # ffn -> feed_forward A : List[Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , snake_case__ ) # time_mix_k -> time_mix_key and reshape if name.endswith('''.time_mix_k''' ): A : List[str] = name.replace('''.time_mix_k''' , '''.time_mix_key''' ) # time_mix_v -> time_mix_value and reshape if name.endswith('''.time_mix_v''' ): A : Union[str, Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' ) # time_mix_r -> time_mix_key and reshape if name.endswith('''.time_mix_r''' ): A : Union[str, Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' ) if name != "head.weight": A : List[Any] = '''rwkv.''' + name A : Dict = weight return state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=False , snake_case__=None ): '''simple docstring''' if tokenizer_file is None: print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' ) A : int = 5_0277 A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' ) else: A : str = PreTrainedTokenizerFast(tokenizer_file=snake_case__ ) A : Any = len(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) # 2. Build the config A : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: A : List[str] = candidate break if size is None: raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' ) if size not in possible_sizes: raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' ) A : Any = RwkvConfig( vocab_size=snake_case__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(snake_case__ ) # 3. Download model file then convert state_dict A : Union[str, Any] = hf_hub_download(snake_case__ , snake_case__ ) A : Tuple = torch.load(snake_case__ , map_location='''cpu''' ) A : List[Any] = convert_state_dict(snake_case__ ) # 4. Split in shards and save A, A : List[str] = shard_checkpoint(snake_case__ ) for shard_file, shard in shards.items(): torch.save(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) if index is not None: A : Dict = os.path.join(snake_case__ , snake_case__ ) # Save the index as well with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: A : List[Any] = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '''\n''' f.write(snake_case__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( '''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' ) A : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: A : Union[str, Any] = torch.load(os.path.join(snake_case__ , snake_case__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case__ , snake_case__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' ) A : int = AutoModelForCausalLM.from_pretrained(snake_case__ ) model.push_to_hub(snake_case__ , max_shard_size='''2GB''' ) tokenizer.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) lowercase : Union[str, Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
3
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A ( __snake_case ): __magic_name__ = DistilBertTokenizer __magic_name__ = DistilBertTokenizerFast __magic_name__ = True @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' ) A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE ) A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
3
1
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase : Optional[Any] = 50_00_00 lowercase , lowercase : Union[str, Any] = os.path.split(__file__) lowercase : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def lowerCAmelCase_ ( snake_case__ , **snake_case__ ): '''simple docstring''' A : Tuple = dataset.map(**snake_case__ ) @get_duration def lowerCAmelCase_ ( snake_case__ , **snake_case__ ): '''simple docstring''' A : str = dataset.filter(**snake_case__ ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: A : str = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) A : List[Any] = generate_example_dataset( os.path.join(snake_case__ , '''dataset.arrow''' ) , snake_case__ , num_examples=snake_case__ ) A : Tuple = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=snake_case__ ) def tokenize(snake_case__ ): return tokenizer(examples['''text'''] ) A : Any = map(snake_case__ ) A : List[Any] = map(snake_case__ , batched=snake_case__ ) A : str = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='''numpy''' ): A : Union[str, Any] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='''pandas''' ): A : int = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): A : List[str] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): A : Optional[Any] = map(snake_case__ , function=lambda snake_case__ : None , batched=snake_case__ ) A : Any = map(snake_case__ , function=snake_case__ , batched=snake_case__ ) A : str = filter(snake_case__ ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(snake_case__ , '''wb''' ) as f: f.write(json.dumps(snake_case__ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
3
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : Optional[int] = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_features''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Optional[int] = num_mel_bins A : Tuple = do_ceptral_normalize A : Dict = normalize_means A : List[Any] = normalize_vars A : List[str] = True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray: """simple docstring""" A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray: """simple docstring""" if normalize_means: A : Dict = x[:input_length].mean(axis=0 ) A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if normalize_vars: A : str = x[:input_length].std(axis=0 ) A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if input_length < x.shape[0]: A : List[str] = padding_value # make sure array is in float32 A : Tuple = x.astype(np.floataa ) return x def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]: """simple docstring""" A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}' ) A : Tuple = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ): A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A : Any = [raw_speech] # extract fbank features A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech] # convert into correct format for padding A : str = BatchFeature({'''input_features''': features} ) A : Union[str, Any] = self.pad( SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) # make sure list is in array format A : List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ): A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features] A : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A : Dict = ( np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD else None ) A : List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE ) if return_tensors is not None: A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE ) return padded_inputs
3
1
'''simple docstring''' from typing import Any def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _validation( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # Creates data structures and fill initial step A : dict = {} A : dict = {} for state in states_space: A : Tuple = observations_space[0] A : List[Any] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) A : Any = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(snake_case__ ) ): A : Dict = observations_space[o] A : Any = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function A : List[Any] = '''''' A : Optional[Any] = -1 for k_state in states_space: A : Dict = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: A : Any = probability A : Union[str, Any] = k_state # Update probabilities and pointers dicts A : List[Any] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) A : Tuple = arg_max # The final observation A : Union[str, Any] = observations_space[len(snake_case__ ) - 1] # argmax for given final observation A : Dict = '''''' A : int = -1 for k_state in states_space: A : Tuple = probabilities[(k_state, final_observation)] if probability > max_probability: A : Optional[int] = probability A : List[Any] = k_state A : Union[str, Any] = arg_max # Process pointers backwards A : int = last_state A : Optional[Any] = [] for o in range(len(snake_case__ ) - 1 , -1 , -1 ): result.append(snake_case__ ) A : Tuple = pointers[previous, observations_space[o]] result.reverse() return result def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _validate_not_empty( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) _validate_lists(snake_case__ , snake_case__ ) _validate_dicts( snake_case__ , snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('''There\'s an empty parameter''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' _validate_list(snake_case__ , '''observations_space''' ) _validate_list(snake_case__ , '''states_space''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if not isinstance(_object , snake_case__ ): A : List[str] = F'{var_name} must be a list' raise ValueError(snake_case__ ) else: for x in _object: if not isinstance(snake_case__ , snake_case__ ): A : Optional[int] = F'{var_name} must be a list of strings' raise ValueError(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' _validate_dict(snake_case__ , '''initial_probabilities''' , snake_case__ ) _validate_nested_dict(snake_case__ , '''transition_probabilities''' ) _validate_nested_dict(snake_case__ , '''emission_probabilities''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' _validate_dict(_object , snake_case__ , snake_case__ ) for x in _object.values(): _validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False ): '''simple docstring''' if not isinstance(_object , snake_case__ ): A : Union[str, Any] = F'{var_name} must be a dict' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object ): A : Union[str, Any] = F'{var_name} all keys must be strings' raise ValueError(snake_case__ ) if not all(isinstance(snake_case__ , snake_case__ ) for x in _object.values() ): A : str = '''nested dictionary ''' if nested else '''''' A : List[str] = F'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(snake_case__ ) if __name__ == "__main__": from doctest import testmod testmod()
3
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : str = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : str = get_tests_dir('fixtures/vocab.json') lowercase : int = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[str] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Dict = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : List[Any] = WavaVecaFeatureExtractor() A : List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : str = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : str = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : str = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Any = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = False class A ( __snake_case ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> Dict: """simple docstring""" A : Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Any: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Union[str, Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : int = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Any = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : str = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : List[str] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : Dict = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Optional[int] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
3
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = len(snake_case__ ) A : Dict = [[0] * n for i in range(snake_case__ )] for i in range(snake_case__ ): A : int = y_points[i] for i in range(2 , snake_case__ ): for j in range(snake_case__ , snake_case__ ): A : Tuple = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
3
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase : Optional[Any] = None lowercase : Tuple = logging.get_logger(__name__) lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase : List[str] = { 'google/rembert': 2_56, } lowercase : Dict = '▁' class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = RemBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : List[Any] = do_lower_case A : str = remove_space A : int = keep_accents A : Union[str, Any] = vocab_file A : List[Any] = False if not self.vocab_file else True def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : List[Any] = [self.sep_token_id] A : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : Tuple = [self.sep_token_id] A : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) ) return A : Any = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
3
1